diff --git a/.gitattributes b/.gitattributes index b118c4abf5e0121e99c7dceb3b31e5b79c89e538..bb4a46055cfc015476aff82e0f66144d55cd6475 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1292,3 +1292,11 @@ data/2025/2504_05xxx/2504.05862/f5f20bac-767d-4260-82e6-943416d1d631_origin.pdf data/2025/2504_05xxx/2504.05888/a802a693-b787-42ea-9506-1d20b2ad6f02_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_06xxx/2504.06122/1f4b320c-f80c-4d99-b14d-3a49e20634f9_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_06xxx/2504.06196/00c24729-2bee-4726-a0b4-13a163fe9cf3_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_05xxx/2504.05657/73cf1d18-a4f6-40ed-812d-0ba2712fd755_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_05xxx/2504.05692/996e8671-5341-4c85-8ef1-45152320354c_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_05xxx/2504.05730/2fcd628a-662d-422e-89a8-df9b6d69e542_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_05xxx/2504.05731/1d2e0a8a-e48a-4cb6-8ec0-35774101eda9_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_05xxx/2504.05741/97d4b9fa-4704-4c9a-9165-90317d2d4443_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_05xxx/2504.05747/ec2428c8-34ac-40c3-9d6a-d16ed6c46317_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_05xxx/2504.05979/c3842c4d-63fe-4775-a7ec-8e60138be3cb_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_06xxx/2504.06311/24099640-7df8-4696-ab55-8a633ecbda84_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2504_05xxx/2504.05657/73cf1d18-a4f6-40ed-812d-0ba2712fd755_content_list.json b/data/2025/2504_05xxx/2504.05657/73cf1d18-a4f6-40ed-812d-0ba2712fd755_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..ae0b7feda1e207187e74f63625bbb6ee84246533 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/73cf1d18-a4f6-40ed-812d-0ba2712fd755_content_list.json @@ -0,0 +1,2402 @@ +[ + { + "type": "text", + "text": "The current version is 'Preprint'.", + "bbox": [ + 91, + 69, + 315, + 83 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This work has been submitted to the IEEE for possible publication. Copyright may be transferred without notice, after which this version may no longer be accessible.", + "bbox": [ + 78, + 85, + 488, + 128 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This information aligns with the guidelines available at:", + "bbox": [ + 93, + 130, + 470, + 143 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://journals.ieeethorcenter.ieee.org/become-an-iiie-journal-author/publishing-ethics/guidelines-and-policies/post-publication-policies/", + "bbox": [ + 76, + 146, + 483, + 189 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY", + "bbox": [ + 76, + 29, + 452, + 41 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 911, + 31, + 919, + 40 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.05657v2 [eess.AS] 26 Oct 2025", + "bbox": [ + 24, + 268, + 57, + 728 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Nes2Net: A Lightweight Nested Architecture for Foundation Model Driven Speech Anti-spoofing", + "text_level": 1, + "bbox": [ + 106, + 70, + 893, + 140 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Tianchi Liu, Student Member, Duc-Tuan Truong, Student Member, Rohan Kumar Das, Senior Member, Kong Aik Lee, Senior Member, Haizhou Li, Fellow", + "bbox": [ + 112, + 147, + 883, + 181 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Abstract—Speech foundation models have significantly advanced various speech-related tasks by providing exceptional representation capabilities. However, their high-dimensional output features often create a mismatch with downstream task models, which typically require lower-dimensional inputs. A common solution is to apply a dimensionality reduction (DR) layer, but this approach increases parameter overhead, computational costs, and risks losing valuable information. To address these issues, we propose Nested Res2Net (Nes2Net), a lightweight back-end architecture designed to directly process high-dimensional features without DR layers. The nested structure enhances multi-scale feature extraction, improves feature interaction, and preserves high-dimensional information. We first validate Nes2Net on CtrSVDD, a singing voice deepfake detection dataset, and report a $22\\%$ performance improvement and an $87\\%$ back-end computational cost reduction over the state-of-the-art baseline. Additionally, extensive testing across four diverse datasets: ASVspoof 2021, ASVspoof 5, PartialSpoof, and In-the-Wild, covering fully spoofed speech, adversarial attacks, partial spoofing, and real-world scenarios, consistently highlights Nes2Net's superior robustness and generalization capabilities. The code package and pre-trained models are available at https://github.com/Liu-Tianchi/Nes2Net.", + "bbox": [ + 73, + 234, + 491, + 525 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Index Terms—DeepFake detection, speech anti-spoofing, Res2Net, Nes2Net, SSL, speech foundation model", + "bbox": [ + 75, + 532, + 488, + 560 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "I. INTRODUCTION", + "text_level": 1, + "bbox": [ + 215, + 583, + 351, + 597 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "SPEECH foundation models, such as wav2vec 2.0 [1], HuBERT [2], and WavLM [3], have revolutionized speech processing by leveraging large-scale pretraining to capture complex acoustic and linguistic patterns [4]. This has driven notable advances in automatic speech recognition (ASR) [5], speaker verification (SV) [6], and other speech applications.", + "bbox": [ + 73, + 604, + 491, + 696 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Beyond traditional tasks, speech foundation models also show great promise in addressing critical security concerns, particularly speech anti-spoofing (also referred to as deepfake detection) [7]. With the growing sophistication of spoofing techniques, such as voice conversion, ensuring the reliability", + "bbox": [ + 73, + 696, + 491, + 772 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Tianchi Liu and Haizhou Li are with the Department of Electrical and Computer Engineering, National University of Singapore, Singapore. Tianchi Liu is also with LIGHTSPEED, Singapore (email: tianchi.liu@u.nus.edu);", + "bbox": [ + 73, + 784, + 491, + 818 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Duc-Tuan Truong is with the Nanyang Technological University, Singapore (email: truongdu001@e.ntu.edu.sg);", + "bbox": [ + 73, + 818, + 491, + 840 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Rohan Kumar Das is with the Fortemedia Singapore, Singapore (email: ecerohan@gmail.com);", + "bbox": [ + 73, + 840, + 491, + 864 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Kong Aik Lee is with the Department of Electrical and Electronic Engineering and the Research Centre for Data Science & Artificial Intelligence, The Hong Kong Polytechnic University, Hong Kong (e-mail: kongaik.lee@polyu.edu.hk);", + "bbox": [ + 73, + 863, + 491, + 909 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Haizhou Li is also with the Shenzhen Research Institute of Big Data, School of Artificial Intelligence, School of Data Science, The Chinese University of Hong Kong, Shenzhen, China (email: haizhouli@cuhk.edu.cn).", + "bbox": [ + 73, + 909, + 491, + 945 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "and security of speech-driven systems has become a pressing concern [8]–[12]. Leveraging the rich representations of these foundation models could significantly improve the robustness and generalization of anti-spoofing systems [13]–[15].", + "bbox": [ + 501, + 234, + 919, + 295 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "While speech foundation models offer exceptional representations, their high-dimensional feature outputs present significant challenges for downstream tasks. Downstream models used in tasks like speech anti-spoofing typically require lower-dimensional features [15]–[17]. To address this mismatch, a common approach is to introduce a dimensionality reduction (DR) layer, usually implemented as a fully connected (FC) layer for transforming high-dimensional features into lower-dimensional features. However, this conventional strategy presents notable drawbacks. Given that downstream classifiers are typically compact [15], [16], the DR layer alone often consumes a substantial portion of the parameters and computational resources within the entire back-end model. Moreover, directly projecting high-dimensional features in a one-shot manner through an FC layer leads to the loss of important information, reducing the effectiveness of speech foundation models. These issues highlight the need for a more efficient and effective solution to bridge the dimensionality gap and fully utilize speech foundation models in downstream tasks.", + "bbox": [ + 501, + 295, + 921, + 580 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address these challenges, we propose Nested Res2Net (Nes2Net) to process high-dimensional features from speech foundation models, eliminating the need for a DR layer while preserving the richness of the original representations. By addressing key limitations of DR layers, such as excessive computational cost and information loss, Nes2Net offers a more efficient and effective solution. This design makes it particularly suitable for tasks requiring a balance of high performance and efficiency, such as speech anti-spoofing. The key contributions of this work can be summarized as follows:", + "bbox": [ + 501, + 580, + 921, + 733 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Novel Architecture: We introduce Nes2Net, a new approach that effectively addresses the limitations of DR layers. Nes2Net retains the expressive power of high-dimensional features while reducing model complexity.", + "- Enhanced Performance, Efficiency, and Generalization: Our method demonstrates a $22\\%$ performance gain and an $87\\%$ reduction in computational costs compared to the state-of-the-art baselines on the CtrlSVDD dataset. Further experiments conducted on four additional datasets across various scenarios demonstrate strong generalization capability and consistently superior performance.", + "- Reproducibility: To facilitate further research and application, we make our scripts and pre-trained models publicly available." + ], + "bbox": [ + 519, + 733, + 921, + 945 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY", + "bbox": [ + 76, + 29, + 452, + 41 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 910, + 30, + 919, + 40 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "II. RELATED WORK", + "text_level": 1, + "bbox": [ + 210, + 69, + 356, + 82 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A. Res2Net", + "text_level": 1, + "bbox": [ + 73, + 88, + 156, + 101 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Res2Net [18] is a well-known architecture designed to extract multi-scale features. Unlike ResNet [19], Res2Net uses hierarchical residual connections within a single block, allowing it to capture patterns across varying receptive fields simultaneously [18]. This design offers proven advantages in speech-related tasks, such as SV [20]–[22] and anti-spoofing [23]–[25], where capturing subtle variations and complex acoustic patterns is important. As shown in Fig. 1, Res2Net (highlighted using a light red block) can also serve as a classifier within a speech foundation model-based anti-spoofing system. Its ability to extract multi-scale features has led to superior performance over conventional models and motivates the design of Nested Res2Net in this work.", + "bbox": [ + 73, + 107, + 491, + 303 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "B. Hand-crafted Feature-based Speech Anti-Spoofing Models", + "text_level": 1, + "bbox": [ + 73, + 321, + 488, + 335 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Hand-crafted acoustic features (such as MFCC) are common choices for many earlier speech anti-spoofing systems. These systems have evolved to effectively detect speech deepfakes [26], [27]. For instance, the Channel-wise Gated Res2Net (CG-Res2Net) [23] introduces a gating mechanism within the Res2Net architecture, enabling dynamic selection of channel-wise features to enhance generalization to unseen attacks. A widely recognized model is AASIST [26], which employs spectro-temporal graph attention layers to capture both temporal and spectral artifacts, thereby achieving efficient and accurate detection. Given AASIST's SOTA performance and its wide adoption in recent anti-spoofing challenges [16], [28], we consider it as our main baseline for evaluation.", + "bbox": [ + 73, + 340, + 491, + 537 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "C. Speech Foundation Models", + "text_level": 1, + "bbox": [ + 73, + 556, + 284, + 570 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Speech foundation models are often referred to as Self-Supervised Learning (SSL) models due to their typical pretraining on large amounts of unlabeled speech data using self-supervised learning techniques. Examples include wav2vec 2.0 [1], HuBERT [2], and WavLM [3]. Unlike hand-crafted acoustic features, which are limited in their ability to adapt to diverse and complex conditions, self-supervised learning (SSL) models learn rich and generalized speech representations that can be effectively adapted to various downstream applications. This allows them to achieve superior performance in speech-related tasks, including speech anti-spoofing.", + "bbox": [ + 73, + 574, + 491, + 741 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "D. Speech Foundation Model-based Anti-spoofing", + "text_level": 1, + "bbox": [ + 73, + 758, + 415, + 773 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As discussed in the previous subsection, speech foundation models can capture more informative representations than handcrafted or raw acoustic features [3]. This makes them highly effective for speech anti-spoofing, as they generalize well across datasets and are more robust to unseen attacks [15]. As a result, many recent anti-spoofing systems increasingly adopt these models as front-ends, feeding their features to the back-end classifiers and consistently outperforming traditional models [16], [29], [30].", + "bbox": [ + 73, + 777, + 490, + 912 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To connect these powerful front-end models to downstream classifiers, a feature aggregation layer is introduced, as shown", + "bbox": [ + 73, + 914, + 491, + 946 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/dabbcf97be8c75c0deb7beb748fdb20e0361404e5c527c4f5e1d38e00b935d08.jpg", + "table_caption": [ + "TABLEI CONTRIBUTION OF THE DR LAYER ON THE NUMBER OF PARAMETERS AND COMPUTATIONAL COST IN BACK-END MODELS. MMACS STANDS FOR MILLION MULTIPLY-ACCUMULATE OPERATIONS." + ], + "table_footnote": [], + "table_body": "
Back-end ModelParametersMMACs
DRTotal%DRTotal%
ResNet [19]131k611k21%26.2470.6237%
Res2Net [18]131k452k29%26.2464.9340%
ECAPA [34]131k497k26%26.2480.2133%
AASIST [26]131k447k29%26.24707.654%
", + "bbox": [ + 511, + 126, + 911, + 212 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "in Fig. 1. This layer combines features from different SSL layers using methods such as a simple weighted sum or attention-based methods like Squeeze-and-Excitation Aggregation (SEA) [16] and Attentive Merging (AttM) [31].", + "bbox": [ + 501, + 228, + 919, + 289 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Following the aggregation layer, the resulting features are passed to the back-end classifier, as shown in the green box of Fig. 1. Existing methods typically use a DR layer, which reduces the high-dimensional features of $N$ channels (commonly $N = 1024$ [1], [3], [32]) to a lower dimension $D$ (e.g., $D = 128$ [15], [16] or $D = 144$ [17], [33]) to match the classifier's input requirements. The classifier model then extracts features from the DR layer outputs and produces the final score. As illustrated in the red box of Fig. 1, commonly used classifier structures include traditional models such as ResNet [19], Res2Net [18], ECAPA-TDNN [34], and AASIST [26].", + "bbox": [ + 501, + 289, + 921, + 469 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The strong performance of these systems stems from their ability to capture rich speech representations, enabling more accurate distinction between real and spoofed speech. As a result, these systems have achieved SOTA results [33], [35], [36], especially in recent challenges like ASVspoof 5 [28], [37], CtrSVDD [16], [38], [39], and ADD [40]. However, the use of a DR layer introduces challenges that limit the backend's ability to fully leverage the rich representations from speech foundation models. In this work, we aim to better unlock the potential of foundation models for speech antispoofing. These issues will be discussed in the next subsection.", + "bbox": [ + 501, + 469, + 921, + 635 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "E. Limitation of Dimensionality Reduction Layer", + "text_level": 1, + "bbox": [ + 504, + 654, + 841, + 669 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Existing speech foundation model-based anti-spoofing systems excel in extracting rich, high-dimensional feature representations, which capture intricate patterns in speech. However, this high dimensionality poses a significant challenge for downstream tasks. Models in these tasks typically require lower-dimensional features [23], [26], [27], creating a mismatch between the output features of the foundation models and the requirements of downstream processing.", + "bbox": [ + 501, + 672, + 919, + 792 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A commonly used approach for dimensionality reduction is to employ a DR layer. However, this approach has several issues, including parameter overhead and potential information loss. As shown in Table I, our analysis of back-end models further emphasizes the inefficiency of this approach. We consider commonly used feature dimensions of $N = 1024$ from large models [1], [3], and a reduced dimension of $D = 128$ , widely adopted in SOTA back-end models [15], [16], [31].", + "bbox": [ + 501, + 794, + 921, + 914 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Across various back-end models, the DR layer, despite being just a single layer, consistently accounts for a substantial", + "bbox": [ + 503, + 914, + 921, + 946 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY", + "bbox": [ + 76, + 29, + 452, + 41 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/60a1d06954731a1d8497f7ad39edfbb82ed60f30aa0d12cd56c49475a8a4119e.jpg", + "image_caption": [ + "Fig. 1. The block diagram of the speech foundation model-based speech anti-spoofing system, showcasing both the traditional back-end models and the proposed Nes2Net back-end. The traditional back-end models include a DR layer and a classifier, such as ResNet [19], Res2Net [18], ECAPA-TDNN [34], and AASIST [26]. In contrast, the proposed Nes2Net back-end model features a DR layer-free design. Additionally, an enhanced version of its nested layer, named Nes2Net-X, is introduced to further improve performance. Abbreviations used in the figure include: 'FC' (fully connected layer), 'Conv' (convolutional layer), 'WS' (weighted sum), 'SE' (squeeze-and-excitation module) [41], and 'Att. Stat. Pool.' (attentive statistics pooling) [42]." + ], + "image_footnote": [], + "bbox": [ + 78, + 70, + 918, + 327 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "share of parameters and computational cost, underscoring its resource-intensive nature. For instance, the DR layer accounts for $21\\%$ to $29\\%$ of the parameters across ResNet, Res2Net, ECAPA, and AASIST. In terms of computational cost, the DR layer generally contributes at least one-third of the total MACs. AASIST is the only exception, where the DR layer accounts for just $4\\%$ of the MACs, primarily because its overall MAC count is an order of magnitude higher than that of other models.", + "bbox": [ + 73, + 411, + 491, + 545 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This table highlights that a single DR layer significantly inflates the back-end model's size and resource demands. Furthermore, its direct projection design discards important high-dimensional features, limiting the overall potential of speech foundation models.", + "bbox": [ + 73, + 547, + 491, + 623 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "III. METHODOLOGY", + "text_level": 1, + "bbox": [ + 207, + 643, + 359, + 657 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A. Proposed Nested Res2Net (Nes2Net)", + "text_level": 1, + "bbox": [ + 73, + 667, + 346, + 681 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The design of Nes2Net is driven by two primary objectives: 1) effectively and efficiently utilizing the high-dimensional features from speech foundation models, and 2) enhancing multi-scale feature extraction to achieve robust generalization in speech anti-spoofing tasks. These objectives are realized through a novel nested architecture that simultaneously improves the efficiency, flexibility, and robustness of the model.", + "bbox": [ + 73, + 686, + 490, + 792 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Efficiency and Retention of Rich Feature Information: The analysis in Section II-E reveals the limitations of employing the DR layer. Building upon the observations, Nes2Net entirely removes the DR layer, directly processing high-dimensional features to retain their intrinsic richness and minimize unnecessary computational costs. By bypassing the DR layer, Nes2Net prevents the information bottleneck typically caused by early dimensionality reduction. This ensures the preservation of detailed representations essential for accurately distinguishing genuine speech from spoofed audio.", + "bbox": [ + 73, + 792, + 491, + 945 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Enhanced Multi-Scale Feature Interaction and Expressiveness: While the Res2Net architecture effectively extracts multi-scale features through hierarchical splits, it exhibits significant limitations when processing high-dimensional features directly, especially with large split scales $s$ . Specifically, Res2Net suffers from feature dilution [18], redundant transformations [43], and restricted interactions among channels. Excessive splitting fragments the features, weakening their expressiveness, and repetitive transformations increase computational redundancy, potentially causing overfitting. Moreover, closely related information can be distributed across non-adjacent subsets, limiting effective cross-channel interactions.", + "bbox": [ + 501, + 411, + 921, + 592 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To overcome these limitations, as illustrated in Fig. 1, we propose a novel Nested Res2Net (Nes2Net) architecture that introduces a hierarchical nesting structure. This additional degree of flexibility significantly enhances the model's representational capability. Each nested layer progressively refines features by building upon outputs from preceding layers and also incorporates efficient local cross-channel attention mechanisms [44], [45], strengthening interactions across channels. This holistic feature extraction approach enables Nes2Net to comprehensively capture intricate speech patterns. Moreover, the cumulative refinement effectively mitigates the issue of feature dilution, preserving rich and expressive multi-scale information. Benefiting from the structural advantages of the nesting strategy, the need for excessive fine-grained splits is reduced, effectively mitigating redundant transformations. This approach also minimizes unnecessary computations, resulting in a compact yet highly expressive model.", + "bbox": [ + 501, + 595, + 921, + 851 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Critically, overfitting is a well-known challenge in speech anti-spoofing tasks, often leading to degraded performance in cross-domain scenarios. Previous studies [23], [26], particularly with compact models like AASIST and Res2Net (both with fewer than 500k parameters), have shown that smaller models can help reduce overfitting. Our experiments with these", + "bbox": [ + 501, + 854, + 923, + 945 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY", + "bbox": [ + 76, + 29, + 452, + 41 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 911, + 31, + 919, + 40 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "models confirm that simply increasing their size does not always lead to better performance and can, in fact, make overfitting worse. As a result, improving feature quality through smarter model structure design becomes more important than just scaling up the model. The nested architecture of Nes2Net provides clear benefits as it maintains computational efficiency while reducing the risk of overfitting.", + "bbox": [ + 73, + 69, + 491, + 174 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The Nes2Net consists of an outer layer and several identical nested layers, described as follows:", + "bbox": [ + 73, + 174, + 491, + 204 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "1) Outer Layer: The outer layer of Nes2Net adopts a structure similar to that of Res2Net. The high-dimensional features produced by a speech foundation model are uniformly split into $s_1$ feature map subsets, denoted by $x_i$ , where $i \\in \\{1, 2, \\dots, s_1\\}$ . Each feature subset $x_i$ has the same spatial size but contains only $\\frac{1}{s_1}$ of the channels of the input feature map. With the exception of $x_1$ , each $x_i$ is paired with a corresponding nested layer, denoted by $\\mathbf{K}_i(\\cdot)$ . The output of $\\mathbf{K}_i(\\cdot)$ , represented as $y_i$ , is computed as follows:", + "bbox": [ + 73, + 205, + 491, + 340 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\ny _ {i} = \\left\\{ \\begin{array}{l l} x _ {i} & i = 1; \\\\ \\mathbf {K} _ {i} \\left(x _ {i}\\right) & i = 2; \\\\ \\mathbf {K} _ {i} \\left(x _ {i} + y _ {i - 1}\\right) & 2 < i \\leq s _ {1}. \\end{array} \\right. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 156, + 353, + 491, + 409 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $x_{i}$ is first added to the output of $\\mathbf{K}_{i - 1}(\\cdot)$ , and the resulting feature map is then fed into $\\mathbf{K}_i(\\cdot)$ for further processing. All $y_{i}$ features are concatenated along the channel dimension. Due to the combinatorial explosion effect [18], the output features encapsulate a fusion of receptive field characteristics across different scales and frame levels. These features are then pooled along the time axis to convert frame-level features into utterance-level representations, which are subsequently used to compute the final classification score.", + "bbox": [ + 73, + 416, + 491, + 551 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "It is worth noting that since the outer layer directly processes high-dimensional features from the speech foundation model, the original two convolutional layers (kernel size of 1) used before splitting and after concatenation in Res2Net are removed to improve efficiency.", + "bbox": [ + 73, + 553, + 491, + 628 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2) Nested Layer: The nested layer acts as the core module responsible for processing the outer layer's intermediate features, denoted by $x_{i}^{\\prime}$ , where $i \\in \\{2, \\ldots, s_1\\}$ . Based on Eq. 1, $x_{i}^{\\prime}$ is defined as:", + "bbox": [ + 73, + 628, + 491, + 689 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nx _ {i} ^ {\\prime} = \\left\\{ \\begin{array}{l l} x _ {i} & i = 2; \\\\ x _ {i} + y _ {i - 1} & 2 < i \\leq s _ {1}. \\end{array} \\right. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 171, + 700, + 488, + 741 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Each nested layer $\\mathbf{K}_i(\\cdot)$ is designed to extract multi-scale representations from its input while maintaining computational efficiency. As shown in Fig. 1, the structure of $\\mathbf{K}_i(\\cdot)$ follows a SE-Res2Net-like design, but its input is the feature subset $x_i'$ from the outer layer of Nes2Net. Specifically, each nested layer consists of the following components:", + "bbox": [ + 73, + 747, + 491, + 838 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Convolutional Layers: The input feature map is first processed by a convolutional layer with a kernel size of 1 to extract local features while preserving the spatial dimensions.", + "bbox": [ + 73, + 838, + 491, + 883 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Multi-Scale Feature Extraction: To enable multi-scale processing, the input feature map $x_{i}^{\\prime}$ is equally split into $s_2$ subsets along the channel dimension, denoted by $x_{i,j}^{\\prime}$ , where $j \\in \\{1, 2, \\ldots, s_2\\}$ . Each subset undergoes separate", + "bbox": [ + 73, + 883, + 491, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "transformations through convolutional operations $\\mathbf{M}_j$ with varying receptive fields, yielding $y_{i,j}$ , formulated as:", + "bbox": [ + 503, + 69, + 919, + 99 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\ny _ {i, j} = \\left\\{ \\begin{array}{l l} x _ {i, j} ^ {\\prime} & j = 1; \\\\ \\mathbf {M} _ {j} \\left(x _ {i, j} ^ {\\prime} + y _ {i, j - 1}\\right) & 1 < j \\leq s _ {2}. \\end{array} \\right. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 104, + 919, + 143 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "These transformed subsets are then concatenated to form the output $y_{i}$ of the nested layer.", + "bbox": [ + 503, + 148, + 919, + 178 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "SE Module: To further enhance the feature representations, a Squeeze-and-Excitation (SE) module is integrated into each nested layer. The SE module adaptively recalibrates channelwise features to emphasize informative features and suppress less relevant ones [41].", + "bbox": [ + 503, + 178, + 919, + 253 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Residual Connections: To enhance gradient flow and stabilize training, a residual connection is applied by adding the input of $x_{i}^{\\prime}$ to its output $y_{i}$ . This design preserves the original information while incorporating newly learned features.", + "bbox": [ + 503, + 253, + 919, + 313 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In summary, the nested layer is lightweight, highly efficient, and designed to improve robustness and generalization across diverse conditions.", + "bbox": [ + 503, + 313, + 919, + 357 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "B. Enhanced Nested Res2Net (Nes2Net-X)", + "text_level": 1, + "bbox": [ + 504, + 373, + 795, + 387 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Nes2Net efficiently addresses the high-dimensional feature issue. However, it relies on an additive combination method within the nested layer, which may limit the flexibility and effectiveness of feature extraction, as it implicitly assigns equal importance to all features. To further enhance the representational capacity of Nes2Net, we propose an improved variant named Nes2Net-X. It replaces the original addition operation in the nested layer with a concatenation followed by a learnable weighted summation. This design explicitly preserves feature subset individuality before fusion and employs learnable weights to adaptively combine these subsets. The Nes2Net-X consists of the following components:", + "bbox": [ + 501, + 391, + 921, + 571 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Feature Splitting and Processing: This component is the same as that in Nes2Net nested layer. The input feature $x_{i}^{\\prime}$ is equally split into $s_2$ subsets along the channel dimension, denoted by $x_{i,j}^{\\prime}$ , where $j \\in \\{1, 2, \\dots, s_2\\}$ . Each subset $x_{i,j}^{\\prime}$ undergoes a convolutional operation to extract feature representations.", + "bbox": [ + 503, + 573, + 921, + 662 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Feature Concatenation: The outputs of the convolutional layers are denoted as $z_{i,j}$ . In Nes2Net-X, instead of summing the processed features as in the Nes2Net, each current subset $x_{i,j}^{\\prime}$ is concatenated with the previous output $z_{i,j-1}$ along a newly introduced dimension before being processed.", + "bbox": [ + 503, + 662, + 921, + 738 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Weighted Sum: The additional dimension created during concatenation is merged back into the original feature space using a 'weighted sum' operation. This operation enables the model to dynamically assign importance to each subset, enhancing feature representation. For each subset, the 'weighted sum' is applied to the output feature $z_{i,j}$ of the convolutional layer. Let $w_{i,j}$ denote the learnable weights assigned to each concatenated feature. The output $y_{i,j}$ of the 'weighted sum' is computed as:", + "bbox": [ + 501, + 738, + 921, + 873 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\ny _ {i, j} = \\sum_ {k = 1} ^ {s} w _ {i, j, k} \\cdot z _ {i, j, k} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 632, + 872, + 919, + 910 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $s$ denotes the number of subsets, $w_{i,j,k}$ represents the weight for the $k$ -th subset features $z_{i,j,k}$ .", + "bbox": [ + 504, + 914, + 921, + 946 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY", + "bbox": [ + 76, + 29, + 452, + 41 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The weighted summation provides more flexible and effective feature integration, offering several advantages:", + "bbox": [ + 73, + 69, + 491, + 99 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Enhanced Feature Diversity: By concatenating features across subsets, the network captures a richer set of features, encompassing various aspects of the input data.", + "- Learnable Feature Fusion: The introduction of learnable weights $w$ enables the model to prioritize more informative features, effectively suppressing less relevant ones. This adaptive mechanism allows the network to focus on the most discriminative features for the task.", + "- Improved Gradient Flow: By combining concatenation with weighted summation, the model facilitates better gradient propagation during training. This helps address potential issues such as vanishing or exploding gradients, leading to more stable and efficient learning." + ], + "bbox": [ + 89, + 101, + 491, + 297 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "These modifications enable Nes2Net-X to retain the strengths of Nes2Net while introducing greater flexibility in feature fusion, ultimately improving performance.", + "bbox": [ + 73, + 299, + 491, + 345 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "IV. EXPERIMENTAL SETUPS", + "text_level": 1, + "bbox": [ + 181, + 358, + 385, + 371 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "A. Datasets", + "text_level": 1, + "bbox": [ + 73, + 377, + 160, + 390 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/98b5b166adf5a4085d47308265f5b1d43548aef5667c9a311dea779157979ccf.jpg", + "table_caption": [ + "TABLE II AN SUMMARY OF THE DATASETS USED IN OUR EXPERIMENTS." + ], + "table_footnote": [], + "table_body": "
DatasetSpoofing TypeNumber of Samples
TrainValidTest
CtrSVDD w/o ACEsinger bona fide [46]Singing Voice84,40443,62564,734
CtrSVDD w/ ACEsinger bona fide [46]67,579
ASVspoof 2019 [47]25,38024,844-
ASVspoof 2021 LA [48]--181,566
ASVspoof 2021 DF [48]Speech--611,829
ASVspoof 5 [49]182,357140,950680,774
In-the-Wild [50]--31,779
PartialSpoof [51]Partial Spoof25,38024,84471,237
", + "bbox": [ + 75, + 441, + 488, + 568 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We use five datasets across various scenarios, including singing voice deepfake, fully spoofed speech, adversarial attacks, and partially spoofed speech, to evaluate the performance of the proposed model. Singing voice deepfake detection (SVDD) is a growing area of interest in the research community [46], [52], [53]. The CtrlSVDD dataset [46], [52] offers structured attack types and official evaluation protocols, making it suitable for systematic architecture exploration. As a newly collected resource, it captures recent spoofing techniques, providing a more challenging and relevant benchmark for modern anti-spoofing systems. We therefore adopt it as a representative example. Moreover, fully spoofed speech is the most studied category. In this work, we include two categories of datasets: (1) the ASVspoof series, which comprises ASVspoof 2019 [47], ASVspoof 2021 Logical Access (LA), ASVspoof 2021 Deepfake (DF) [48], and ASVspoof 5 [49]; and (2) the In-the-Wild dataset [50], which reflects real-world usage scenarios. Partially spoofed speech alters only part of an utterance to convey deceptive meaning. This emerging challenge has attracted growing attention. We use the PartialSpoof [51] dataset as a representative benchmark. Table II summarizes the datasets used in this study. Models are trained on the training set and validated on the validation set to select the best checkpoint for testing.", + "bbox": [ + 73, + 582, + 491, + 946 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For CtrlSVDD [46], we report results on two official test protocols, according to whether ACESinger bona fide samples are included. The 'A14' attack type of the CtrlSVDD dataset is excluded following the official guidelines [46]. ASVspoof 2019 [47] is used only for training and validation, while the In-the-Wild [50], ASVspoof 2021 LA and DF [48] datasets are used only for testing. For the recently released ASVspoof 5 dataset [49], we use its train, development, and evaluation partitions for model training, validation, and testing, respectively. For PartialSpoof [51], we follow the standard partitioning into train, development, and evaluation sets.", + "bbox": [ + 501, + 69, + 921, + 236 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "B. Training Strategies", + "text_level": 1, + "bbox": [ + 504, + 253, + 660, + 268 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Each experiment is run three times using different random seeds. We report both the result from the best-performing run and the average performance across all runs. The values of $s_1$ and $s_2$ are both set to 8 for Nes2Net and Nes2Net-X. The baseline systems for each dataset are built using SOTA models, and our proposed model adopts similar training strategies. The details are as follows:", + "bbox": [ + 501, + 271, + 921, + 377 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/efe1584a963bd86fedb0e112a862eb4e48cd69fc72a15bbe69361f939014ea25.jpg", + "image_caption": [ + "Fig. 2. The cyclic learning rate schedule using cosine annealing." + ], + "image_footnote": [], + "bbox": [ + 511, + 390, + 911, + 470 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "CtSVDD: For the models trained on the CtrSVDD dataset [46], [52], we follow the baseline system from $[16]^1$ . Following the setting in [16], we use a random seed of 42 to ensure reproducibility. Furthermore, due to the inherent stochasticity in deep learning, repeated runs are necessary to obtain reliable average results. We use the AdamW optimizer with batch size 34, an initial learning rate of $1 \\times 10^{-6}$ , and weight decay of $1 \\times 10^{-4}$ . The learning rate is scheduled using cosine annealing with a cycle to a minimum of $1 \\times 10^{-9}$ .", + "bbox": [ + 501, + 502, + 921, + 638 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As shown in Fig. 2, over 75 training epochs, we select checkpoints from the epoch with the minimum learning rate, as well as its preceding and following epochs, for validation. The best validation result is then used for testing. We use binary focal loss [54], a generalization of binary cross-entropy loss, with a focusing parameter $(\\gamma)$ of 2 and a positive class weight $(\\alpha)$ of 0.25. To standardize input length, each sample is randomly cropped or padded to 4 seconds during training. We adopt the Rawboost 'parallel: $(1)+(2)$ ' data augmentation strategy [55], as explored in [16]. WavLM is used as the frontend model for this dataset. The pre-trained and implementation of WavLM are obtained from S3PRL2.", + "bbox": [ + 501, + 638, + 921, + 818 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ASVspoof 2019 & 2021: For the models trained on the ASVspoof 2019 [47] dataset, we follow the baseline system proposed in $[15]^3$ . Audio data are cropped or concatenated to create segments of approximately 4 seconds in duration (64,600 samples) for both training and testing. We use the", + "bbox": [ + 503, + 819, + 921, + 896 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ https://github.com/Anmol2059/SVDD2024", + "$^{2}$ https://github.com/s3prl/s3prl", + "3https://github.com/TakHemlata/SSL_Anti-spoofing" + ], + "bbox": [ + 517, + 905, + 795, + 944 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY", + "bbox": [ + 76, + 29, + 452, + 41 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 911, + 31, + 919, + 40 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/bb3829ac42d9a686c979bc4e9bb61faab459c4bb9e22a76b5267ae278546f1d8.jpg", + "table_caption": [ + "TABLE III PERFORMANCE IN EER $(\\%)$ ON THE CTRSVDD EVALUATION SET [46] WITH WAVLM [3] FRONT-END. RESULTS ARE SHOWN AS 'BEST (MEAN)' OVER 3 RUNS. PARAMETERS. AND MMACs REFER TO NUMBER OF PARAMETERS AND MILLION MULTIPLY-ACCUMULATE OPERATIONS, RESPECTIVELY. W/O AND W/ ACE B.F. REFER TO 'WITHOUT' AND 'WITH' ACESINGER BONA FIDE SAMPLES, RESPECTIVELY. ATTACK-SPECIFIC EERS ARE COMPUTED UNDER THE 'W/O ACE B.F' CONDITION. BEST RESULTS ARE IN BOLD; SECOND-BEST ARE UNDERlined.' $\\dagger$ DENOTES IMPLEMENTATION CONDUCTED BY US." + ], + "table_footnote": [ + "$\\text{※}$ XWSB is an ensemble-like model that combine two SSL front-ends [39], while all other models in Table III are based on single SSL front-end." + ], + "table_body": "
Back-endParams.MMACsEER of Different Attack TypesPooled EER
A9A10A11A12A13w/o ACE. B.F.w/ ACE. B.F.
XWSB [39] *--------2.32
SLS [39]--------2.59
AASIST (C=32) [16]447k707.65------2.70
AASIST Light (C=24) †159k91.351.27 (1.37)0.87 (1.00)5.44 (5.86)4.84 (5.65)0.98 (1.05)3.95 (4.35)3.41 (3.77)
AASIST Standard(C=32) †447k707.651.18 (1.28)0.73 (0.86)3.63 (3.86)5.65 (5.77)0.88 (1.00)3.30 (3.36)2.79 (2.89)
AASIST Large(C=40) †662k1,091.281.32 (1.37)0.87 (0.97)3.70 (3.96)5.04 (5.63)0.96 (1.06)3.19 (3.36)2.71 (2.94)
AASIST XL(C=48) †835k1,555.561.23 (1.36)0.76 (0.92)3.40 (4.64)4.93 (5.55)0.89 (1.06)3.12 (3.62)2.76 (3.18)
AASIST XXL(C=56) †1,087k2,104.570.96 (1.20)0.66 (0.84)3.86 (4.15)4.83 (5.43)0.75 (0.95)3.05 (3.43)2.65 (2.95)
ResNet †611k70.621.18 (1.21)0.80 (0.93)3.97 (5.06)4.60 (4.86)0.96 (1.03)3.11 (3.61)2.74 (3.17)
Res2Net †452k64.931.26 (1.37)0.83 (0.86)3.59 (4.08)4.45 (4.80)1.08 (1.09)3.02 (3.24)2.61 (2.78)
ECAPA-TDNN (C=128) †497k80.211.18 (1.39)0.67 (0.85)4.47 (5.84)4.63 (4.96)0.87 (1.04)3.19 (3.74)2.79 (3.30)
Proposed Nes2Net511k58.111.23 (1.34)0.76 (0.81)2.40 (2.43)5.00 (5.24)0.96 (0.99)2.53 (2.55)2.22 (2.27)
Proposed Nes2Net-X511k91.351.21 (1.23)0.63 (0.76)2.09 (2.32)4.99 (5.24)0.83 (0.92)2.48 (2.51)2.20 (2.24)
", + "bbox": [ + 76, + 133, + 929, + 329 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Adam optimizer [56] with a weight decay of $1 \\times 10^{-4}$ . To reproduce the AASIST baseline [15], we reduce the original batch size from 14 to 8 due to GPU memory constraints, and halve the learning rate from $1 \\times 10^{-6}$ to $5 \\times 10^{-7}$ . For Nes2Net, benefiting from its lower GPU memory consumption, we use a batch size of 12 with a learning rate of $2.5 \\times 10^{-7}$ . The loss function used is weighted Cross Entropy. Following [15], we apply Rawboost augmentations [55], specifically 'series: $(1 + 2 + 3)$ ' (Algo4) and 'series: $(1 + 2)$ ' (Algo5), for AASIST baselines. For the proposed Nes2Net-X, only the former augmentation is applied. All models are trained for 100 epochs and the best checkpoint on the validation set is used for testing on the ASVspoof 2021 [48] and In-the-Wild [50] datasets.", + "bbox": [ + 76, + 358, + 488, + 553 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ASVspoof 5: Both our AASIST baseline and the proposed Nes2Net-X models are trained using settings similar to those used for AASIST in the ASVspoof 2019 corpus. However, several differences apply. The final learning rate is set to $1 \\times 10^{-7}$ , we apply data augmentation using MUSAN [57] and RIR [58], and training is stopped if there is no improvement on the development set for 5 consecutive epochs.", + "bbox": [ + 76, + 554, + 488, + 657 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "**PartialProof:** For models trained on the PartialSpoof [51], we follow the baseline systems described in [51], $[59]^4$ . Specifically, we use wav2vec 2.0 as the front-end, the MSE for P2SGrad [60] as the loss function, and Adam [56] as the optimizer. Following [59], the batch size is set to 2, and a learning rate of $2.5 \\times 10^{-6}$ is adopted for the baseline systems. For the proposed Nes2Net and Nes2Net-X, the learning rate is set to $1 \\times 10^{-5}$ . The pooling layer used for the proposed Nes2Net and Nes2Net-X is the Attentive Statistics Pooling [42], and the reduction ratio of SE module is set to 8. Training is terminated if no improvement is observed on the development set for 20 consecutive epochs. The epoch yielding the best performance on the development set is used for testing.", + "bbox": [ + 76, + 659, + 488, + 854 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "V. RESULTS AND ANALYSIS", + "text_level": 1, + "bbox": [ + 184, + 872, + 382, + 883 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "All Equal Error Rate (EER) results in this work are reported as 'best (mean)' over multiple runs. For cited results that (1)", + "bbox": [ + 76, + 890, + 488, + 919 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "are based on a single run, (2) report only the best result, or (3) lack sufficient details, only a single value is presented.", + "bbox": [ + 508, + 359, + 919, + 387 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "A. Studies on the CtrSVDD dataset", + "text_level": 1, + "bbox": [ + 506, + 411, + 748, + 425 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We conduct experiments on the CtrSVDD dataset [46], following two testing protocols: one including ACESinger bona fide samples and the other excluding them [38]. While results for both protocols are reported in Table III, our primary analysis focuses on the scenario 'without ACESinger bona fide (w/o ACE. B.F.)', as recommended by the dataset creators. Since AASIST $(\\mathrm{C} = 32)$ in our prior work [16], as well as SLS and XWSB [39], were evaluated during the CtrSVDD Challenge 2024, portions of their test sets differ from the current official protocol. As a result, the EER by attack type is not directly comparable. To ensure a fair comparison, we re-implemented the AASIST $(\\mathrm{C} = 32)$ system under the official protocol and used it as our baseline, referred to as AASIST Standard $(\\mathrm{C} = 32)$ in Table III, achieving an EER of $2.79\\%$ which is close to the originally reported $2.70\\%$ [16]. Under the 'w/o ACE B.F.' condition, the best run achieves an EER of $3.30\\%$ with an average of $3.36\\%$ across three runs. Further experiments show that scaling up the AASIST model does not improve mean EER, possibly due to parameter redundancy.", + "bbox": [ + 506, + 430, + 919, + 717 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We additionally evaluate several widely-used baseline systems, including ResNet [19], Res2Net [18], and ECAPATDNN [34]. ECAPA-TDNN and ResNet achieve EERs of $3.74\\%$ and $3.61\\%$ , respectively, which are slightly worse than that of AASIST. In contrast, Res2Net benefits from the advantages of multi-scale feature extraction, delivering the best average performance among the baseline systems with an EER of $3.24\\%$ . Our proposed Nes2Net outperforms all baseline systems, achieving a mean EER of $2.55\\%$ with the lowest computational cost. Furthermore, the enhanced version, Nes2Net-X, further improves the performance to $2.51\\%$ EER, marking the best single-model performance reported to date. Compared to Res2Net, ResNet, ECAPA-TDNN, and SOTA AASIST ( $C = 32$ ), Nes2Net-X achieves EER reductions of $23\\%$ , $30\\%$ , $33\\%$ , and $25\\%$ , respectively.", + "bbox": [ + 506, + 718, + 919, + 943 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY", + "bbox": [ + 76, + 29, + 450, + 40 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 911, + 30, + 919, + 39 + ], + "page_idx": 6 + }, + { + "type": "footer", + "text": "4https://github.com/nii-yamagishilab/PartialSpoof", + "bbox": [ + 89, + 931, + 354, + 943 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/3cc1916acf788294e6c8ae9cfda94679fa734b07114b7b845c6ca02cdef7c997.jpg", + "table_caption": [ + "TABLE IV PERFORMANCE IN EER $(\\%)$ ON THE CTRSVDD EVALUATION SET [46], COMPARING THE PROPOSED NES2NET WITH RES2NET AND ITS VARIOUS VARIANTS. THE RESULTS ARE REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS, E.G., 3.02 (3.24) IN THE FIRST ROW, OR AS THE RESULT OF A SINGLE EXPERIMENT, E.G., 3.21 IN THE SECOND ROW. 'B' AND 'S' REPRESENT THE NUMBER OF BLOCKS AND SCALE OF RES2NET, RESPECTIVELY." + ], + "table_footnote": [], + "table_body": "
Back-endDimensionality Reduction LayerReduced Dimension DParams.MMACsPooled EERRemarks
w/o ACE. B.F.w/ ACE. B.F.
Res2Net (b=4, s=4)128452k64.933.02 (3.24)2.61 (2.78)
Res2Net (b=4, s=16)128427k59.953.212.80■ increase scale s
Res2Net (b=4, s=64)128419k58.283.152.74
Res2Net (b=4, s=128)128417k57.983.262.88
Res2Net (b=4, s=4)64180k23.254.323.76change D
Res2Net (b=4, s=4)2561,273k202.913.833.38
Res2Net-woDR (b=1, s=4)×-861k119.154.153.62
Res2Net-woDR (b=1, s=8)×-615k70.124.233.71
Res2Net-woDR (b=1, s=16)×-456k38.243.823.35remove dimensionality reduction layer and increase scale s
Res2Net-woDR (b=1, s=32)×-367k20.452.98 (3.45)2.56 (3.02)
Res2Net-woDR (b=1, s=64)×-320k11.102.73 (2.97)2.42 (2.61)
Res2Net-woDR (b=1, s=128)×-296k6.313.292.88
Res2Net-woDR (b=1, s=256)×-284k3.883.573.13
Res2Net-woDR (b=2, s=64)×-637k21.783.202.82increase depth
Res2Net-woDR (b=4, s=64)×-1,270k43.153.09 (3.18)2.73 (2.83)
Proposed Nes2Net×-511k58.112.53 (2.55)2.22 (2.27)proposed nested design
Proposed Nes2Net-X×-511k91.352.48 (2.51)2.20 (2.24)
", + "bbox": [ + 76, + 127, + 916, + 371 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We also analyze performance across different synthetic attack types using the 'w/o ACE B.F.' protocol. Except for the 'A12' attack type [46], our model consistently achieves either the best or second-best performance, demonstrating strong generalization and robustness. Notably, the 'A12' attack type, based on Singing Voice Synthesis (SVS), proves particularly challenging, showing higher EER across all models and highlighting a potential area for future improvement.", + "bbox": [ + 73, + 386, + 491, + 507 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We observe that performance trends are consistent across both conditions, with and without ACESinger bona fide samples. Moreover, the EER is lower when ACESinger bona fide samples are included. This indicates that, even though ACESinger bona fide samples are considered out-of-domain, the trained models exhibit strong generalization capabilities and are able to classify these samples accurately.", + "bbox": [ + 73, + 507, + 491, + 613 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "B. The Roadmap of the Nes2Net", + "text_level": 1, + "bbox": [ + 73, + 625, + 302, + 640 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we introduce the roadmap from Res2Net to the proposed Nes2Net, with detailed results summarized in Table IV. All systems are implemented and evaluated under a unified framework for fair comparison. To aid interpretation, we visualize the number of parameters, MACs, and EER. These are represented in Fig. 3 by circle size, the horizontal axis, and the vertical axis, respectively. In the following, we provide detailed analyses:", + "bbox": [ + 73, + 643, + 490, + 763 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Investigating Res2Net: Among the baselines in Table III, the Res2Net-based back-end outperforms ResNet, AASIST, and ECAPA-TDNN on the CtrlSVDD dataset. Therefore, we select it as the reference baseline for further investigation. First, we experiment with adjusting the scale $s$ of Res2Net. We observe that as $s$ increases, the number of split groups increases linearly; however, the performance shows no significant improvement (depicted as the teal blue line in Fig. 3). This may be because adding too many split groups dilutes the feature representation, leading to redundancy.", + "bbox": [ + 73, + 763, + 491, + 914 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Next, we explore varying the dimensionality of the output features from the DR layer (referred to as Reduced Dimension", + "bbox": [ + 73, + 914, + 491, + 945 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b205df6253934c495cd6bf54f649c643eae158455b8a5a57b5838ccd4e16a70f.jpg", + "image_caption": [ + "Fig. 3. Visualization of Table III and IV, highlighting our exploration of Res2Net and the roadmap of architectural changes leading to Nes2Net." + ], + "image_footnote": [], + "bbox": [ + 506, + 385, + 921, + 752 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "$D$ , depicted as the steel gray line in Fig. 3). Reducing $D$ to 64 significantly lowers model size and MACs, compared to the default $D = 128$ , but leads to substantial performance degradation, increasing EER from $3.02\\%$ to $4.32\\%$ . Conversely, increasing $D$ to 256 results in a much larger model size and MACs but still leads to worse performance than $D = 128$ . This may be because a larger $D$ introduces over-parameterization and noise. This may explain why $D = 128$ is commonly adopted in SOTA models [15], [16].", + "bbox": [ + 501, + 792, + 921, + 929 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Removal of DR Layer: Foundation models often incorpo", + "bbox": [ + 519, + 929, + 921, + 945 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY", + "bbox": [ + 76, + 29, + 452, + 41 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "rate a DR layer in their back-end architecture to compress high-dimensional features into lower-dimensional representations, facilitating downstream tasks. For instance, models like wav2vec 2.0-AASIST [15] utilize such a layer alongside task-specific classifiers (e.g., AASIST, ResNet). However, as discussed in Section II-E, this projection layer consumes a substantial portion of the back-end model's parameters and MACs while potentially causing information loss.", + "bbox": [ + 73, + 69, + 491, + 189 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To explore whether bypassing this layer preserves more task-relevant information, we propose a new back-end model: ResNet without Dimensionality Reduction (ResNet-woDR). By directly processing high-dimensional features, ResNet-woDR simplifies the architecture and focuses on the raw features extracted by the speech foundation model. The naming emphasizes the absence of a DR layer, differentiating it from traditional approaches.", + "bbox": [ + 73, + 190, + 491, + 311 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We further evaluate the performance of ResNet-woDR with different scales $s$ (depicted as the green line in Fig. 3). The best performance is observed with $s = 64$ , achieving a mean EER of $2.97\\%$ , which surpasses the best Res2Net baseline. Increasing $s$ beyond this point leads to a decline in performance, likely due to the following factors:", + "bbox": [ + 73, + 311, + 491, + 404 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Feature Dilution. A large $s$ excessively fragments feature representations, weakening their expressiveness and resulting in diluted, less informative features [18].", + "- Redundant Transformations. An overly large $s$ introduces unnecessary feature transformations, leading to overfitting and reduced generalization [43].", + "- Restricted Feature Interaction. Since channels are unordered, distant groups may still contain correlated information. In this case, the additional convolutional layers introduced by splitting limit their interactions, weakening the model's ability to capture complex patterns." + ], + "bbox": [ + 89, + 409, + 491, + 575 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Based on the optimal $s$ , we increase the number of blocks $b$ to deepen the model (depicted as the light pink line in Fig. 3). However, no further performance improvement is observed. This could be attributed to the deeper architecture's limited ability to effectively utilize the additional parameters, resulting in diminishing performance gains. It may also increase the risk of overfitting.", + "bbox": [ + 73, + 579, + 491, + 685 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The Novel Nested Design: Prior experiments demonstrate that removing the DR layer enhances the performance of Res2Net. We believe that directly extracting information from high-dimensional speech foundation model features avoids the information loss introduced by DR. Our experiments with variations in scale, depth, and dimensionality show that a mean EER of $2.97\\%$ marks a performance bottleneck for this design.", + "bbox": [ + 73, + 686, + 491, + 792 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Compared to ResNet-woDR, the proposed Nes2Net adopts a novel nested design that enhances flexibility and significantly boosts the model's representational capacity. Processing larger feature subsets in the outer layer facilitates better interactions across channels within each nested layer. Furthermore, the integrated local cross-channel attention mechanism enhances feature selection while mitigating redundancy, addressing limitations in prior designs. This architectural refinement overcomes the performance limitations observed in the original Res2Net design. As a result, Nes2Net and its enhanced variant", + "bbox": [ + 73, + 792, + 491, + 944 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Nes2Net-X surpass the earlier performance bottleneck, achieving mean EERs of $2.55\\%$ and $2.51\\%$ , respectively.", + "bbox": [ + 503, + 68, + 921, + 99 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "C. Studies on the ASVspoof 2021 dataset", + "text_level": 1, + "bbox": [ + 504, + 117, + 790, + 132 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "TABLE V PERFORMANCE IN EER $(\\%)$ ON THE ASVspoof 2021 LA AND DF. THE RESULTS ARE REPORTED IN THE FORMAT OF 'BEST (MEAN). CKPT AVG. REFERS TO THE NUMBER OF CHECKPOINTS AVERAGED. $\\ddagger$ DENOTES RE-IMPLEMENTATION CONDUCTED BY US. 'ALGO4' AND 'ALGO5' REPRESENT RAWBOOST SERIES AUGMENTATIONS: $(1 + 2 + 3)$ AND $(1 + 2)$ [55], RESPECTIVELY. PARAMETERS THAT ARE UNDERlined ARE CALCULATED BY US. $-$ REPRESENTS UNKNOWN. N/A INDICATES THAT THE SYSTEM DOES NOT USE THE AVERAGE CHECKPOINTS METHOD.", + "bbox": [ + 506, + 138, + 919, + 242 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/8e23480718b64a36e60a649ebac4e58f7fde3d839dc800ff33cb052003b55e25.jpg", + "table_caption": [], + "table_footnote": [ + "\\*: with extra data augmentation [71] $\\ddagger$ : ensemble of multiple models" + ], + "table_body": "
RemarkFront-endBack-end ModelBack-end ParametersCKPT Avg.ASVspoof 2021
LADF
2022wav2vec2.0 FIR-NB [61]--3.546.18
2022wav2vec2.0 FIR-WB [61]--7.084.98
2022wav2vec2.0 LGF [62]--9.664.75
2023wav2vec2.0 Conformer (fix) [63]2,506k551.382.27
2023wav2vec2.0 Conformer (var) [63]2,506k50.877.36
2024wav2vec2.0 Ensembling [64] ‡--2.32 (4.48)5.60 (8.74)
2024WavLMASP+MLP [65]1,051k-3.314.47
2024wav2vec2.0 SLIM [14]---(4.4)
2024WavLMAttM-LSTM [31]936k6N/A3.503.19
2024wav2vec2.0 FTDKD [66]--2.962.82
2024wav2vec2.0 AASIST2 [67]--1.612.77
2024wav2vec2.0 MFA [68]--5.082.56
2024wav2vec2.0 MoE [69]--2.962.54
2024wav2vec2.0 OCKD [70]--0.902.27
2024wav2vec2.0 TCM [33]2,383k751.032.06
2024wav2vec2.0 SLS [35]23,399k8-2.87 (3.88)1.92 (2.09)
2025wav2vec2.0 LSR+LSA [71]--1.192.43
2025wav2vec2.0 LSR+LSA [71] ※--1.051.86
2025wav2vec2.0 WaveSpec [72]---1.90
2025wav2vec2.0 Mamba [17]1,937k950.931.88
2025wav2vec2.0 SSL-EOW-S. [73] ‡---1.75 (2.91)
2025wav2vec2.0 Cal. Ensemble [73] ‡---(2.03)
2022wav2vec2.0 AASIST [15]447k10N/A0.82 (1.00)2.85 (3.69)
wav2vec2.0 AASIST (algo4)447kN/A1.13 (1.36)3.37 (4.09)
wav2vec2.0 AASIST (algo5)447kN/A0.93 (1.40)3.56 (5.07)
Ourswav2vec2.0 Nes2Net511kN/A1.61 (1.90)1.89 (2.12)
wav2vec2.0 Nes2Net-X511kN/A1.73 (1.95)1.65 (1.91)
wav2vec2.0 Nes2Net-X511k31.66 (1.87)1.54 (1.98)
wav2vec2.0 Nes2Net-X511k51.88 (2.00)1.49 (1.78)
", + "bbox": [ + 504, + 246, + 921, + 589 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The ASVspoof series datasets are widely used as benchmarks for advancing research in detecting spoofed speech [47], [48]. Following the standard protocol, we train models on ASVspoof 2019 [47] and evaluate them on ASVspoof 2021 Logical Access (LA) and Deepfake (DF) tasks [48]. The LA task focuses on detecting synthetic and voice-converted speech transmitted over telephony systems, introducing challenges related to channel effects and transmission variability. In contrast, the DF task targets detecting manipulated, compressed speech data commonly found on online platforms. This reflects real-world scenarios where deepfake audio circulates, making the DF task a valuable benchmark for evaluating deepfake detection systems.", + "bbox": [ + 501, + 616, + 921, + 811 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The results in Table V show that for the LA track, our Nes2Net achieves a mean EER of $1.90\\%$ , comparable to SOTA systems. For the DF track, which more closely reflects", + "bbox": [ + 503, + 811, + 921, + 858 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "$^{5}$ https://github.com/ErosRos/conformer-based-classifier-for-anti-spoofing", + "$^{6}$ https://github.com/pandartialdTJU/AttM_INTERSPEECH24", + "7https://github.com/ductuantruong/tcm_add", + "$^{8}$ https://github.com/QiShanZhang/SLSforASVspoof-2021-DF", + "9https://github.com/swagshaw/XLSR-Mamba", + "$^{10}$ https://github.com/TakHemlata/SSL_Anti-spoofing" + ], + "bbox": [ + 514, + 867, + 910, + 944 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY", + "bbox": [ + 76, + 29, + 452, + 41 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/aaf4389be4b7945f02c8bda6ea039fe9c5e53dd5e8bc867b886ce5c647d07ecd.jpg", + "table_caption": [ + "TABLE VI PERFORMANCE IN EER $(\\%)$ FOR DIFFERENT TYPES OF VOCODERS AND COMPRESSION CONDITIONS ON THE ASVSPOOF 2021 DF TEST SET. THE FIVE EER VALUES FOR EACH SUB-ITEM, FROM LEFT TO RIGHT, CORRESPOND TO NES2NET-X, MAMBA [17], SLS [35], TCM [33], AND AASIST [15].THE BEST PERFORMANCE IS REPORTED IN BOLD FONTS, AND THE SECOND-BEST IS UNDERLINED." + ], + "table_footnote": [], + "table_body": "
Traditional VocoderWav ConcatenationNeural Autoreg.Neural Non-autoreg.UnknownPooled EER
C1 -0.36/0.78/1.21/0.95/1.220.76/0.76/0.80/0.76/2.282.70/3.88/3.12/3.89/3.450.52/0.87/0.68/0.95/1.561.64/1.63/1.23/1.73/1.991.47/1.89/1.72/2.23/2.34
C2 Low mp31.48/0.94/1.94/1.67/2.722.96/2.20/2.16/2.56/5.842.89/3.23/2.71/3.59/5.961.23/0.86/0.78/1.32/3.332.54/1.69/1.65/1.93/4.301.75/1.84/2.02/2.11/4.30
C3 High mp30.44/0.88/1.39/0.96/1.831.13/1.49/1.17/1.45/3.352.47/3.35/2.91/3.70/3.790.44/0.87/0.69/0.88/2.022.29/1.85/1.34/1.67/2.651.32/1.85/1.59/1.95/2.64
C4 Low m4a0.44/0.95/1.48/1.22/1.571.15/0.85/1.24/1.67/2.092.79/3.39/2.79/3.40/3.750.54/0.96/0.70/1.22/1.651.32/1.22/1.14/1.41/2.101.40/1.92/1.74/2.01/2.37
C5 High m4a0.45/0.80/1.34/0.98/1.160.62/0.76/0.71/0.76/2.102.77/3.48/2.96/3.73/3.390.56/0.90/0.64/1.07/1.341.88/1.70/1.34/1.43/1.871.59/2.05/1.79/1.96/2.14
C6 Low ogg0.69/1.13/2.14/1.44/2.350.80/0.97/0.91/0.91/2.231.92/2.80/2.44/2.79/3.670.48/0.78/0.61/0.84/1.621.05/1.14/1.00/1.01/2.231.09/1.61/1.88/1.87/2.58
C7 High ogg0.70/1.13/1.52/1.35/1.570.62/0.80/0.71/0.80/1.502.05/2.84/2.26/2.66/2.920.43/0.65/0.52/0.74/1.001.34/1.05/0.96/0.96/1.271.35/1.61/1.57/1.74/1.92
C8 mp3→m4a0.95/1.26/2.28/1.74/3.011.52/0.97/1.08/1.08/2.962.22/3.01/2.31/2.96/4.490.61/0.57/0.65/0.95/2.051.61/1.18/1.09/1.18/2.661.48/1.65/1.92/1.97/3.31
C9 ogg→m4a0.70/1.26/2.15/1.49/2.280.88/0.97/0.99/0.88/2.521.92/3.01/2.57/2.88/3.760.52/0.70/0.65/0.78/1.570.96/1.09/1.09/1.05/2.141.13/1.79/2.04/1.88/2.75
Pooled EER0.72/1.14/1.88/1.40/2.151.10/1.05/1.07/1.14/2.852.70/3.32/2.86/3.40/4.050.63/0.80/0.69/0.94/1.841.86/1.43/1.23/1.38/2.451.49/1.88/1.92/2.06/2.85
Traditional VocoderWav ConcatenationNeural AutoregressionNeural Non-autoregressionUnknownPooled EER
C1
C2
C3
C4
C5
C6
C7
C8
C9
Pooled EER
", + "bbox": [ + 76, + 122, + 919, + 551 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Fig. 4. Visualization of the EER $(\\%)$ across various vocoders and compression conditions on the ASVspoof 2021 DF test set. Each EER value is shown as a colored circle, where the size indicates the EER value, and the color represents the performance ranking among the five models: blue (best) to light red (worst). The five EER values for each sub-item, from left to right, correspond to the proposed Nes2Net-X, Mamba [17], SLS [35], TCM [33], and AASIST [15].", + "bbox": [ + 73, + 553, + 923, + 590 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "real-world scenarios as discussed earlier, the baseline system AASIST [15] achieves its best EER of $2.85\\%$ and a mean EER of $3.69\\%$ , remaining competitive with current SOTA systems. The SLS [35] and TCM [33] models achieve EERs close to $2\\%$ , demonstrating strong performance at the SOTA level. The Mamba-based [17] model further improves results, reducing the EER to $1.88\\%$ . Notably, our proposed Nes2Net attains its best EER of $1.89\\%$ and a mean EER of $2.12\\%$ EER, comparable to the performance of current SOTA systems. The enhanced variant, Nes2Net-X achieves the best performance among all compared systems, with its best EER of $1.65\\%$ and a mean EER of $1.91\\%$ .", + "bbox": [ + 73, + 606, + 491, + 787 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Inspired by prior works [17], [33], we average the weights of several top-performing checkpoints on the validation set to obtain an improved model. This approach further improves the performance of the DF task to a best EER of $1.49\\%$ and a mean EER of $1.78\\%$ , which, to the best of our knowledge, is the best performance reported to date. Furthermore, compared to Mamba [17], our model achieves this performance with approximately $74\\%$ fewer parameters, demonstrating superior efficiency.", + "bbox": [ + 73, + 789, + 490, + 926 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The analysis above summarizes overall performance on the", + "bbox": [ + 89, + 929, + 491, + 945 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "DF test set. The DF dataset also provides detailed labels for vocoder types and compression conditions, enabling more fine-grained analysis. To further evaluate performance, we compare the SOTA models Mamba, SLS, TCM, and AASIST with our proposed Nes2Net-X across these sub-tracks. The results are presented in Table VI. To improve readability and make the extensive numerical data easier to interpret, we also visualize the table's results in Fig. 4.", + "bbox": [ + 501, + 604, + 921, + 727 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "For traditional vocoders, all models perform well, with most EERs below $2\\%$ . Notably, our proposed Nes2Net-X achieves exceptional results, consistently yielding EERs under $1\\%$ across all conditions except C2. This demonstrates the strong stability of Nes2Net-X when handling unseen and relatively simple scenarios. In contrast, for neural autoregressive vocoders, all models experience a noticeable drop in performance, with EER reaching up to $5.96\\%$ . This indicates the greater challenge posed by the sequential and dynamic nature of autoregressive vocoders, which introduce higher variability in synthesis. Nevertheless, Nes2Net-X maintains a clear advantage over the competing models, demonstrating its robustness in handling these complex synthesis conditions.", + "bbox": [ + 501, + 729, + 921, + 926 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "From the perspective of compression conditions, the differ", + "bbox": [ + 519, + 929, + 921, + 945 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY", + "bbox": [ + 76, + 29, + 452, + 41 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 906, + 30, + 921, + 40 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ences in model performance are less pronounced compared to the variations observed across vocoder types. Nes2Net-X consistently achieves the lowest EERs across all compression conditions, regardless of the level of distortion introduced by compression. This consistency highlights the model's strong generalization ability across different levels of compressions.", + "bbox": [ + 73, + 69, + 491, + 159 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Overall, these findings demonstrate that Nes2Net-X is not only highly effective across diverse vocoder types, but also maintains superior performance under varying compression conditions. This robustness underscores the model's capability to handle both compression diversity and complex synthesis challenges, making it a reliable solution for deepfake audio detection across a wide range of scenarios.", + "bbox": [ + 73, + 159, + 491, + 263 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "D. The results on the In-the-Wild dataset", + "text_level": 1, + "bbox": [ + 73, + 277, + 359, + 291 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "TABLE VII PERFORMANCE IN EER $(\\%)$ ON THE IN-THE-WILD [50] DATASET. OUR RESULT IS REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS.", + "bbox": [ + 73, + 294, + 490, + 330 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/79157a288ac5f97faf334aba645a2a71ff54ca0a6409da7b5db961540f267480.jpg", + "table_caption": [], + "table_footnote": [ + "$\\text{※}$ with extra data augmentation [71]" + ], + "table_body": "
Front-endYearBack-endEER
wav2vec 2.02022Rawnet&ASSIST (reported by [35])10.46
2024SLIM [14]- (12.5)
2024MoE [69]9.17
2024Conformer [63]8.42
2024TCM [33]7.79
2024OCKD [70]7.68
2024SLS [35]7.46 (8.87)
2024Pascu et al. [74]- (7.2)
2025Mamba [17]6.71
2025WaveSpec [72]6.58
2025LSR+LSA [71]5.92
2025LSR+LSA [71]※5.54
-Proposed Nes2Net5.80 (7.06)
-Proposed Nes2Net-X5.52 (6.60)
", + "bbox": [ + 78, + 337, + 490, + 523 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The In-the-Wild dataset [50] is a collection of deepfake videos sourced from the internet. Unlike controlled datasets, it captures the diverse and unpredictable nature of real-world scenarios. This diversity is essential for developing and evaluating deepfake detection models, as it challenges them to generalize effectively across a wide range of conditions.", + "bbox": [ + 73, + 537, + 490, + 627 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In addition, unlike many other datasets that rely on self-generated fake audio, this dataset is collected from publicly available video and audio files explicitly labeled as audio deepfakes [50]. To account for the potential presence of partial spoofing, we evaluate our proposed Nes2Net and Nes2Net-X using the entire duration of each test sample instead of restricting it to the first 4 seconds, as the latter approach risks missing partially spoofed segments.", + "bbox": [ + 73, + 628, + 491, + 748 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The testing results, alongside SOTA models, are reported in Table VII. We find that the overall performance trends are consistent with those seen on the ASVspoof 2021 DF dataset. However, EERs on the In-the-Wild dataset are generally higher than those on the DF dataset, reflecting greater complexity and variability in real-world scenarios. Notably, the proposed Nes2Net-X outperforms all SOTA models, achieving the lowest EER of $5.52\\%$ and a mean EER of $6.60\\%$ on this challenging dataset.", + "bbox": [ + 73, + 748, + 491, + 883 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "E. The results on the ASVspoof 5 dataset", + "text_level": 1, + "bbox": [ + 73, + 895, + 362, + 911 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The ASVspoof 5 dataset represents the most recent edition in the ASVspoof series. Unlike earlier versions, it introduces", + "bbox": [ + 73, + 914, + 491, + 944 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/659129421e45a9fe814172a8f9a34bba84b17527efa124efa6a95150223c258d.jpg", + "table_caption": [ + "TABLE VIII A COMPARISON BETWEEN THE PROPOSED NES2NET AND THE AASIST BASELINE SYSTEM ON THE ASVSPOOF 5 DATASET [49]. 'PARAMS.' AND 'MMACs' REFER TO THE NUMBER OF PARAMETERS AND THE NUMBER OF MILLION MULTIPLY-ACCUMULATE OPERATIONS, RESPECTIVELY. 'AVG.' INDICATES THE AVERAGE RELATIVE PERFORMANCE IMPROVEMENT ACROSS ALL THREE EVALUATION METRICS." + ], + "table_footnote": [], + "table_body": "
Back-endPerformance
ModelParams.↓MMACs↓CLLR↓minDCF↓EER↓Avg.
AASIST447k707.650.95870.16456.08Benchmark
Nes2Net511k58.110.79120.15686.137.1%
Nes2Net-X511k91.350.73440.15355.9210.9%
", + "bbox": [ + 509, + 146, + 916, + 234 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "adversarial attacks and is crowdsourced under various acoustic conditions [49]. As it is newly released, there are currently no existing systems available for a fair comparison. Therefore, we re-implement the AASIST system as a baseline and compare it with our proposed Nes2Net and Nes2Net-X model. Following the ASVspoof 5 challenge guidelines [49], we use WavLM [3] as the front-end. Based the evaluation protocol in [37], we assess performance using three metrics: Cost of Log-Likelihood Ratio (CLLR), minimum Detection Cost Function (minDCF), and EER, and present the results in Table VIII. We observe that the Nes2Net and Nes2Net-X backend models result in only a slight increase in the number of parameters compared to AASIST, while significantly reducing MMMs. Moreover, across all three evaluation metrics, the Nes2Net and Nes2Net-X back-ends improve performance by $7.1\\%$ and $10.9\\%$ , receptively.", + "bbox": [ + 501, + 252, + 921, + 493 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "F. The results on the PartialSpoof dataset", + "text_level": 1, + "bbox": [ + 504, + 517, + 792, + 534 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/ff815d3d676fe98054c7f2384cec4ecceab1946ec4936157be8fb0c8b58b53f0.jpg", + "table_caption": [ + "TABLE IX PERFORMANCE IN EER $(\\%)$ ON THE PARTIALSPOOF [51] DATASET. THE RESULTS ARE REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS. $\\dagger$ INDICATES RESULTS OBTAINED FROM OUR IMPLEMENTATION." + ], + "table_footnote": [], + "table_body": "
Front-endYearBack-endPartialSpoof [51]
DevEval
wav2vec 2.02024gMLP [51]0.350.64
-gMLP†0.39 (0.43)0.72 (0.80)
20241D Res2Net [59]0.350.73
-1D Res2Net†0.35 (0.38)0.73 (0.79)
-SE ResNet†0.31 (0.50)0.77 (0.78)
-Nes2Net0.24 (0.36)0.53 (0.68)
-Nes2Net-X0.20 (0.33)0.57 (0.64)
", + "bbox": [ + 516, + 599, + 911, + 727 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Partially manipulating a sentence can significantly alter its intended meaning [59]. When such manipulations occur in small regions, existing models trained on fully spoofed speech and relying on pooling functions struggle to detect these subtle changes. Consequently, there is growing interest in the detection of partially spoofed speech [51], [59], [75].", + "bbox": [ + 501, + 747, + 919, + 838 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "To evaluate the performance of our proposed model across different spoofing tasks, we conduct experiments on the PartialSpoof dataset [51]. The results are presented in Table IX. First, we reproduce the performance of two SOTA models, achieving results comparable to those reported in their original papers [51], [59]. Additionally, we evaluate SE ResNet, which demonstrated performance similar to the other baselines. In", + "bbox": [ + 501, + 839, + 921, + 944 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY", + "bbox": [ + 76, + 29, + 452, + 41 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/bd43c91baeed1799a4c358e11c48f71336db4a8f12d9b56d330407e1dc303c30.jpg", + "table_caption": [ + "TABLE X THE PERFORMANCE IN EER $(\\%)$ ON THE ASVspoof 2021 LA, DF [48], AND IN-THE-WILD [50] DATASETS. THE RESULTS ARE REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS. W/ AUG.' AND W/O AUG.' INDICATE WHETHER EVALUATION WITH AUGMENTATIONS ON THE VALIDATION SET IS USED TO SELECT THE BEST CHECKPOINT FOR TESTING. CKPT Avg. REFERS TO THE NUMBER OF CHECKPOINTS AVERAGED." + ], + "table_footnote": [], + "table_body": "
Back-endTrain SetCKPT Avg.w/ Aug.w/o Aug.
21LA [48]21DF [48]In-the-Wild [50]21LA [48]21DF [48]In-the-Wild [50]
Nes2Net-XASVspoof 19 [47]N/A1.63 (1.79)1.84 (2.03)5.56 (6.61)1.73 (1.95)1.65 (1.91)5.73 (6.83)
31.70 (1.80)1.88 (1.98)5.15 (6.31)1.66 (1.87)1.54 (1.98)5.59 (6.90)
51.67 (1.78)1.80 (1.91)5.28 (6.31)1.88 (2.00)1.49 (1.78)5.52 (6.60)
", + "bbox": [ + 104, + 121, + 888, + 204 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "contrast, our proposed Nes2Net and Nes2Net-X outperform all three baselines.", + "bbox": [ + 73, + 220, + 491, + 250 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "G. Empirical Runtime and Memory Analysis", + "text_level": 1, + "bbox": [ + 73, + 273, + 382, + 287 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Number of parameters and MMACs are widely adopted metrics for evaluating model efficiency. These platform-independent measures offer consistent and fair comparisons across different hardware. However, to better reflect the real-world deployment costs of back-end architectures, we additionally benchmark their training time, inference time, and peak GPU memory usage, as summarized in Table XI.", + "bbox": [ + 73, + 292, + 490, + 398 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/9d42cb0231914a7b71d6b7e6fa59b6b9bb1fdc4444517d6b37eb83903910150a.jpg", + "table_caption": [ + "TABLE XI TRAINING AND INFERENCE EFFICIENCY COMPARISON ACROSS BACK-END MODELS. THE TABLE REPORTS THE AVERAGE (AVG.) TRAINING AND INFERENCE TIME PER BATCH IN MILLSECONDS (MS/BATCH), AS WELL AS PEAK GPU MEMORY USAGE IN MEGABYTES (MB)." + ], + "table_footnote": [], + "table_body": "
Back-endAvg. Time (ms/batch)↓Peak GPU Memory↓ (MB)
TrainingInference
AASIST Light (C=24)27.07.81,327
AASIST Standard(C=32)53.818.73,454
AASIST Large(C=40)79.228.14,273
AASIST XL(C=48)86.130.75,087
AASIST XXL(C=56)100.937.45,905
ResNet7.82.6691
Res2Net15.63.5721
ECAPA-TDNN (C=128)9.43.1698
Proposed Nes2Net20.24.91,312
Proposed Nes2Net-X29.19.22,231
", + "bbox": [ + 76, + 481, + 500, + 643 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "All back-end models are evaluated under identical conditions: input features of 400 frames with 1024 dimensions, a batch size of 64, and execution on a dedicated NVIDIA H20 GPU. The first 10 batches are used for warm-up and excluded from the measurement, and the inference and training times are averaged over the subsequent 200 batches. Training time includes the forward, backward, and optimizer update steps.", + "bbox": [ + 73, + 657, + 490, + 762 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The results show that AASIST models exhibit rapidly increasing runtime and memory consumption as the channel dimension $C$ grows. In contrast, our proposed Nes2Net achieves notably lower latency and memory usage. Nes2Net-X further improves performance in some settings by preserving more high-dimensional information, albeit at the cost of higher resource consumption.", + "bbox": [ + 73, + 762, + 491, + 868 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Conventional models such as ResNet, Res2Net, and ECAPA-TDNN offer faster runtime and smaller memory footprints than our proposed method, but fall short in detection accuracy as shown in earlier experiments. Therefore, when selecting a back-end architecture, we believe both Nes2Net", + "bbox": [ + 73, + 869, + 491, + 946 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "and Nes2Net-X offer flexible options: the former prioritizes efficiency, while the latter favors accuracy when computational resources permit. This underscores the importance of balancing performance and efficiency in real-world applications.", + "bbox": [ + 501, + 220, + 919, + 281 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "H. Should We Use Augmentation During Validation?", + "text_level": 1, + "bbox": [ + 503, + 294, + 864, + 309 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In all previous experiments, the datasets are split into three non-overlapping subsets: training, validation (or development), and test sets. The validation set is used to select the best-performing checkpoints for final evaluation on the test set. The training set typically applies data augmentation to enhance model performance and generalization. However, the use of augmentation during validation remains inconsistent across prior studies. For instance, wav2vec 2.0-AASIST [15] applies the same augmentation strategy to both training and validation sets. In contrast, WavLM-AASIST [16] does not use augmentation on the validation set, aligning with common practices in speaker verification research [34], [76], [77].", + "bbox": [ + 501, + 311, + 919, + 493 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In this section, we compare these two approaches and report the results in Table X. We observe that applying the same augmentation to the validation set as in the training set leads to worse performance on ASVspoof 2021 DF [48], but better results on In-the-Wild [50]. When no augmentation is applied to the validation set, the opposite trend is observed.", + "bbox": [ + 501, + 494, + 919, + 583 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "From the outcome of the above study, we believe that in cases where robustness to certain variations (e.g., noise, compression, or distortions) is important, applying augmentation during validation provides insights into how well the model handles such conditions. As a result, the selected checkpoints from this approach may generalize better to these variations. Further investigation into this topic may yield deeper insights for future work.", + "bbox": [ + 501, + 583, + 919, + 703 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "VI. CONCLUSION", + "text_level": 1, + "bbox": [ + 645, + 715, + 779, + 728 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In this work, we propose Nested Res2Net (Nes2Net) and its enhanced variant, Nes2Net-X, as lightweight and dimensionality reduction (DR) layer-free back-end architectures designed for speech anti-spoofing in the era of foundation models. Unlike conventional approaches that rely on a DR layer to bridge the mismatch between high-dimensional features and downstream classifiers, our proposed architectures directly process these rich representations. This not only eliminates the computational and parameter overhead introduced by DR layers but also avoids information loss, enhancing overall system efficiency and robustness.", + "bbox": [ + 501, + 733, + 919, + 898 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Nes2Net incorporates a novel nested multi-scale design that enables more effective feature extraction and deeper cross-channel interactions without increasing model complexity.", + "bbox": [ + 501, + 898, + 921, + 946 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY", + "bbox": [ + 76, + 29, + 452, + 41 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The improved Nes2Net-X further strengthens representation learning by introducing learnable weighted feature fusion, offering adaptive control over the feature aggregation process.", + "bbox": [ + 73, + 69, + 491, + 114 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We conduct extensive evaluations across five representative datasets: CtrSVDD, ASVspoof 2021, ASVspoof 5, Partial-Spoof, and In-the-Wild, covering a wide range of singing voice deepfakes, fully spoofed speech, adversarial attacks, real-world deepfakes, and partially spoofed speech. Across all scenarios, our models achieve SOTA performance, demonstrating superior generalization, compactness, and resilience under unseen and challenging conditions.", + "bbox": [ + 73, + 114, + 491, + 234 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In summary, Nes2Net and Nes2Net-X offer a general-purpose, resource-efficient back-end for foundation model-based speech anti-spoofing, providing a practical yet powerful alternative to DR-dependent designs. To facilitate future research and applications, we make all source code and pretrained models publicly available.", + "bbox": [ + 73, + 234, + 491, + 325 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 235, + 332, + 331, + 345 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] A. Baevski, Y. Zhou, A. Mohamed, and M. Auli, \"wav2vec 2.0: A framework for self-supervised learning of speech representations,\" in Proc. Adv. Neural Inf. Process. Syst. (NeurIPS), vol. 33, 2020, pp. 12449-12460.", + "[2] W.-N. Hsu, B. Bolte, Y.-H. H. Tsai, K. Lakhotia, R. Salakhutdinov, and A. Mohamed, \"HuBERT: Self-supervised speech representation learning by masked prediction of hidden units,\" IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 29, pp. 3451-3460, 2021.", + "[3] S. Chen, C. Wang, Z. Chen, Y. Wu, S. Liu, Z. Chen, J. Li, N. Kanda, T. Yoshioka, X. Xiao, J. Wu, L. Zhou, S. Ren, Y. Qian, Y. Qian, J. Wu, M. Zeng, X. Yu, and F. Wei, \"WavLM: Large-scale self-supervised pre-training for full stack speech processing,\" IEEE J. Sel. Top. Signal Process., vol. 16, no. 6, pp. 1505-1518, 2022.", + "[4] A. T. Liu, S.-W. Li, and H.-y. Lee, “TERA: Self-supervised learning of transformer encoder representation for speech,” IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 29, pp. 2351-2366, 2021.", + "[5] J. Zhao and W.-Q. Zhang, \"Improving automatic speech recognition performance for low-resource languages with self-supervised models,\" IEEE J. Sel. Top. Signal Process., vol. 16, no. 6, pp. 1227-1241, 2022.", + "[6] J. weon Jung, W. Zhang, J. Shi, Z. Aldeneh, T. Higuchi, A. Gichamba, B.-J. Theobald, A. Hussen Abdelaziz, and S. Watanabe, \"ESPnet-SPK: full pipeline speaker embedding toolkit with reproducible recipes, self-supervised front-ends, and off-the-shelf models,\" in Proc. INTERSPEECH, 2024, pp. 4278-4282.", + "[7] M. Li, Y. Ahmadiadli, and X.-P. Zhang, \"A survey on speech deepfake detection,\" ACM Comput. Surv., vol. 57, no. 7, 2025.", + "[8] N. M. Müller, P. Kawa, W. H. Choong, E. Casanova, E. Gölle, T. Müller, P. Syga, P. Sperl, and K. Böttinger, \"MLAAD: The multi-language audio anti-spoofing dataset,\" in Proc. Int. Jt. Conf. Neural Netw. (IJCNN), 2024, pp. 1-7.", + "[9] Y. Xie, Y. Lu, R. Fu, Z. Wen, Z. Wang, J. Tao, X. Qi, X. Wang, Y. Liu, H. Cheng, L. Ye, and Y. Sun, \"The codecfake dataset and countermeasures for the universally detection of deepfake audio,\" IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 33, pp. 386-400, 2025.", + "[10] R. K. Das, X. Tian, T. Kinnunen, and H. Li, “The attacker's perspective on automatic speaker verification: An overview,” in Proc. INTERSPEECH, 2020, pp. 4213–4217.", + "[11] J.-w. Jung, Y. Wu, X. Wang, J.-H. Kim, S. Maiti, Y. Matsunaga, H.-j. Shim, J. Tian, N. Evans, J. S. Chung, W. Zhang, S. Um, S. Takamichi, and S. Watanabe, \"SpoofCeleb: Speech deepfake detection and SASV in the wild,\" IEEE Open J. Signal Process., vol. 6, pp. 68-77, 2025.", + "[12] J. Du, X. Chen, H. Wu, L. Zhang, I. Lin, I. Chiu, W. Ren, Y. Tseng, Y. Tsao, J.-S. R. Jang et al., \"CodecFake-Omni: A large-scale codec-based deepfake speech dataset,\" arXiv preprint arXiv:2501.08238, 2025.", + "[13] X. Chen, H. Wu, R. Jang, and H. yi Lee, \"Singing voice graph modeling for singfake detection,\" in Proc. INTERSPEECH, 2024, pp. 4843-4847.", + "[14] Y. Zhu, S. Koppisetti, T. Tran, and G. Bharaj, \"SLIM: Style-linguistics mismatch model for generalized audio deepfake detection,\" in Proc. Adv. Neural Inf. Process. Syst. (NeurIPS), vol. 37, 2024, pp. 67901-67928.", + "[15] H. Tak, M. Todisco, X. Wang, J. weon Jung, J. Yamagishi, and N. Evans, \"Automatic speaker verification spoofing and deepfake detection using wav2vec 2.0 and data augmentation,\" in Proc. Odyssey Speaker Lang. Recognit. Workshop, 2022, pp. 112-119." + ], + "bbox": [ + 76, + 351, + 491, + 950 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[16] A. Guragain, T. Liu, Z. Pan, H. B. Sailor, and Q. Wang, \"Speech foundation model ensembles for the controlled singing voice deepfake detection (CtrSVDD) challenge 2024,\" in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024.", + "[17] Y. Xiao and R. K. Das, \"XLSR-Mamba: A dual-column bidirectional state space model for spoofing attack detection,\" IEEE Signal Process Lett., vol. 32, pp. 1276-1280, 2025.", + "[18] S.-H. Gao, M.-M. Cheng, K. Zhao, X.-Y. Zhang, M.-H. Yang, and P. Torr, “Res2Net: A new multi-scale backbone architecture,” IEEE Trans. Pattern Anal. Mach. Intell., vol. 43, no. 2, pp. 652-662, 2021.", + "[19] K. He, X. Zhang, S. Ren, and J. Sun, \"Deep residual learning for image recognition,\" in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2016, pp. 770-778.", + "[20] Y. Chen, S. Zheng, H. Wang, L. Cheng, Q. Chen, and J. Qi, \"An enhanced Res2Net with local and global feature fusion for speaker verification,\" in Proc. INTERSPEECH, 2023, pp. 2228-2232.", + "[21] Y. Chen, S. Zheng, H. Wang, L. Cheng, Q. Chen, S. Zhang, and J. Li, \"ERes2NetV2: Boosting short-duration speaker verification performance with computational efficiency,\" in Proc. INTERSPEECH, 2024, pp. 3245-3249.", + "[22] T. Liu, K. A. Lee, Q. Wang, and H. Li, \"Golden Gemini is all you need: Finding the sweet spots for speaker verification,\" IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 32, pp. 2324-2337, 2024.", + "[23] X. Li, X. Wu, H. Lu, X. Liu, and H. Meng, \"Channel-wise gated Res2Net: Towards robust detection of synthetic speech attacks,\" in Proc. INTERSPEECH, 2021, pp. 4314-4318.", + "[24] J. Kim and S. M. Ban, \"Phase-aware spoof speech detection based on Res2Net with phase network,\" in Proc. ICASSP, 2023, pp. 1-5.", + "[25] T. Liu, I. Kukanov, Z. Pan, Q. Wang, H. B. Sailor, and K. A. Lee, \"Towards quantifying and reducing language mismatch effects in cross-lingual speech anti-spoofing,\" in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024, pp. 1185-1192.", + "[26] J.-w. Jung, H.-S. Heo, H. Tak, H.-j. Shim, J. S. Chung, B.-J. Lee, H.-J. Yu, and N. Evans, \"AASIST: Audio anti-spoofing using integrated spectro-temporal graph attention networks,\" in Proc. ICASSP, 2022, pp. 6367-6371.", + "[27] Y. Chen, J. Yi, J. Xue, C. Wang, X. Zhang, S. Dong, S. Zeng, J. Tao, Z. Lv, and C. Fan, \"RawBMamba: End-to-end bidirectional state space model for audio deepfake detection,\" in Proc. INTERSPEECH, 2024, pp. 2720-2724.", + "[28] Y. Chen, H. Wu, N. Jiang, X. Xia, Q. Gu, Y. Hao, P. Cai, Y. Guan, J. Wang, W. Xie et al., \"Ustc-kxdigit system description for asvsproof5 challenge,\" arXiv preprint arXiv:2409.01695, 2024.", + "[29] Z. Wei, D. Ye, J. Deng, and Y. Lin, “From voices to beats: Enhancing music deepfake detection by identifying forgeries in background,” in Proc. ICASSP, 2025, pp. 1-5.", + "[30] Y. Guan, Y. Ai, Z. Li, S. Peng, and W. Guo, \"Recursive feature learning from pre-trained models for spoofing speech detection,\" in Proc. ICASSP, 2025, pp. 1-5.", + "[31] Z. Pan, T. Liu, H. B. Sailor, and Q. Wang, \"Attentive merging of hidden embeddings from pre-trained speech model for anti-spoofing detection,\" in Proc. INTERSPEECH, 2024, pp. 2090-2094.", + "[32] M. Huaifah, T. Liu, H. B. Sailor, K. M. Tan, T. K. Vangani, Q. Wang, J. H. Wong, N. F. Chen, and A. T. Aw, \"Towards a speech foundation model for Singapore and beyond,\" arXiv preprint arXiv:2412.11538, 2024.", + "[33] D.-T. Truong, R. Tao, T. Nguyen, H.-T. Luong, K. A. Lee, and E. S. Chng, “Temporal-channel modeling in multi-head self-attention for synthetic speech detection,” in Proc. INTERSPEECH, 2024, pp. 537–541.", + "[34] B. Desplanques, J. Thienpondt, and K. Demuynck, \"ECAPA-TDNN: Emphasized channel attention, propagation and aggregation in TDNN based speaker verification,\" in Proc. INTERSPEECH, 2020, pp. 3830-3834.", + "[35] Q. Zhang, S. Wen, and T. Hu, \"Audio deepfake detection with self-supervised XLS-R and SLS classifier,\" in Proc. ACM Int. Conf. Multimedia, 2024, pp. 6765-6773.", + "[36] Z. Ge, X. Xu, H. Guo, Z. Yang, and B. Schuller, \"Gncl: A graph neural network with consistency loss for segment-level spoofed speech detection,\" in Proc. ICASSP, 2025, pp. 1-5.", + "[37] X. Wang, H. Delgado, H. Tak, J. weon Jung, H. jin Shim, M. Todisco, I. Kukanov, X. Liu, M. Sahidullah, T. H. Kinnunen, N. Evans, K. A. Lee, and J. Yamagishi, \"ASVspoof 5: crowdsourced speech data, deepfakes, and adversarial attacks at scale,\" in Autom. Speaker Verif. Spoofing Countermeas. Workshop, 2024, pp. 1-8." + ], + "bbox": [ + 506, + 70, + 921, + 922 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY", + "bbox": [ + 76, + 29, + 452, + 41 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[38] Y. Zhang, Y. Zang, J. Shi, R. Yamamoto, T. Toda, and Z. Duan, \"SVDD 2024: The inaugural singing voice deepfake detection challenge,\" in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024, pp. 782-787.", + "[39] Q. Zhang, S. Wen, F. Yan, T. Hu, and J. Li, \"XWSB: A blend system utilizing XLS-R and WavLM with SLS classifier detection system for SVDD 2024 challenge,\" in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024, pp. 788-794.", + "[40] J. Yi, J. Tao, R. Fu, X. Yan, C. Wang, T. Wang, C. Y. Zhang, X. Zhang, Y. Zhao, Y. Ren et al., \"ADD 2023: the second audio deepfake detection challenge,\" arXiv preprint arXiv:2305.13774, 2023.", + "[41] J. Hu, L. Shen, and G. Sun, \"Squeeze-and-excitation networks,\" in Proc. IEEE Conf. Comput. Vis. Pattern Recog. (CVPR), 2018.", + "[42] K. Okabe, T. Koshinaka, and K. Shinoda, \"Attentive statistics pooling for deep speaker embedding,\" in Proc. INTERSPEECH, 2018, pp. 2252-2256.", + "[43] T. Zhou, Y. Zhao, and J. Wu, \"ResNeXt and Res2Net structures for speaker verification,\" in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2021, pp. 301-307.", + "[44] Q. Wang, B. Wu, P. Zhu, P. Li, W. Zuo, and Q. Hu, \"ECA-Net: Efficient channel attention for deep convolutional neural networks,\" in Proc. IEEE Conf. Comput. Vis. Pattern Recog. (CVPR), 2020, pp. 11531-11539.", + "[45] T. Liu, R. K. Das, K. A. Lee, and H. Li, \"MFA: TDNN with multi-scale frequency-channel attention for text-independent speaker verification with short utterances,\" in Proc. ICASSP, 2022, pp. 7517-7521.", + "[46] Y. Zang, J. Shi, Y. Zhang, R. Yamamoto, J. Han, Y. Tang, S. Xu, W. Zhao, J. Guo, T. Toda, and Z. Duan, \"CtrSVDD: A benchmark dataset and baseline analysis for controlled singing voice deepfake detection,\" in Proc. INTERSPEECH, 2024, pp. 4783-4787.", + "[47] X. Wang, J. Yamagishi, M. Todisco, H. Delgado, A. Nautsch, N. Evans, M. Sahidullah, V. Vestman, T. Kinnunen, K. A. Lee, L. Juvela, P. Alku, Y.-H. Peng, H.-T. Hwang, Y. Tsao, H.-M. Wang, S. L. Maguer, M. Becker, F. Henderson, R. Clark, Y. Zhang, Q. Wang, Y. Jia, K. Onuma, K. Mushika, T. Kaneda, Y. Jiang, L.-J. Liu, Y.-C. Wu, W.-C. Huang, T. Toda, K. Tanaka, H. Kameoka, I. Steiner, D. Matrouf, J.-F. Bonastre, A. Govender, S. Ronanki, J.-X. Zhang, and Z.-H. Ling, \"ASVspoof 2019: A large-scale public database of synthesized, converted and replayed speech,\" Comput. Speech Lang., vol. 64, p. 101114, 2020.", + "[48] J. Yamagishi, X. Wang, M. Todisco, M. Sahidullah, J. Patino, A. Nautsch, X. Liu, K. A. Lee, T. Kinnunen, N. Evans, and H. Delgado, \"ASVspoof 2021: accelerating progress in spoofed and deepfake speech detection,\" in Autom. Speaker Verif. Spoofing Countermeas. Challenge, 2021, pp. 47-54.", + "[49] X. Wang, H. Delgado, H. Tak, J.-w. Jung, H.-j. Shim, M. Todisco, I. Kukanov, X. Liu, M. Sahidullah, T. Kinnunen et al., \"ASVspoof 5: Design, collection and validation of resources for spoofing, deepfake, and adversarial attack detection using crowdsourced speech,\" arXiv preprint arXiv:2502.08857, 2025.", + "[50] N. M. Müller, P. Czempin, F. Dieckmann, A. Froghyar, and K. Böttinger, \"Does audio deepfake detection generalize?\" in Proc. INTERSPEECH, 2022, pp. 2783-2787.", + "[51] L. Zhang, X. Wang, E. Cooper, N. Evans, and J. Yamagishi, \"The PartialProof database and countermeasures for the detection of short fake speech segments embedded in an utterance,\" IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 31, pp. 813-825, 2023.", + "[52] Y. Zang, Y. Zhang, M. Heydari, and Z. Duan, \"SingFake: Singing voice deepfake detection,\" in Proc. ICASSP, 2024, pp. 12156-12160.", + "[53] Y. Xie, J. Zhou, X. Lu, Z. Jiang, Y. Yang, H. Cheng, and L. Ye, \"FSD: An initial chinese dataset for fake song detection,\" in Proc. ICASSP, 2024, pp. 4605-4609.", + "[54] T.-Y. Lin, P. Goyal, R. Girshick, K. He, and P. Dollar, “Focal loss for dense object detection,” in IEEE Int. Conf. Comput. Vis. (ICCV), 2017, pp. 2980–2988.", + "[55] H. Tak, M. Kamble, J. Patino, M. Todisco, and N. Evans, \"Rawboost: A raw data boosting and augmentation method applied to automatic speaker verification anti-spoofing,\" in Proc. ICASSP, 2022, pp. 6382-6386." + ], + "bbox": [ + 76, + 70, + 491, + 829 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[56] D. P. Kingma and J. Ba, “Adam: A method for stochastic optimization,” in Int. Conf. Learn. Represent., 2015.", + "[57] D. Snyder, G. Chen, and D. Povey, “Musan: A music, speech, and noise corpus,” arXiv preprint arXiv:1510.08484, 2015.", + "[58] T. Ko, V. Peddinti, D. Povey, M. L. Seltzer, and S. Khudanpur, “A study on data augmentation of reverberant speech for robust speech recognition,” in 2017 IEEE international conference on acoustics, speech and signal processing (ICASSP). IEEE, 2017, pp. 5220–5224.", + "[59] T. Liu, L. Zhang, R. K. Das, Y. Ma, R. Tao, and H. Li, \"How do neural spoofing countermeasures detect partially spoofed audio?\" in Proc. INTERSPEECH, 2024, pp. 1105-1109.", + "[60] X. Wang and J. Yamagishi, “A comparative study on recent neural spoofing countermeasures for synthetic speech detection,” in Proc. INTERSPEECH, 2021, pp. 4259–4263.", + "[61] J. M. Martin-Doñas and A. Álvarez, “The Vicomtech audio deepfake detection system based on wav2vec2 for the 2022 ADD challenge,” in Proc. ICASSP, 2022, pp. 9241–9245.", + "[62] X. Wang and J. Yamagishi, “Investigating self-supervised front ends for speech spoofing countermeasures,” in Proc. Odyssey Speaker Lang. Recognit. Workshop, 2022, pp. 100–106.", + "[63] E. Rosello, A. Gomez-Alanis, A. M. Gomez, and A. Peinado, “A conformer-based classifier for variable-length utterance processing in anti-spoofing,” in Proc. INTERSPEECH, 2023, pp. 5281-5285.", + "[64] E. Rosello, A. M. Gomez, I. López-Espejo, A. M. Peinado, and J. M. Martín-Doñas, “Anti-spoofing ensembling model: Dynamic weight allocation in ensemble models for improved voice biometrics security,” in Proc. INTERSPEECH, 2024, pp. 497–501.", + "[65] H. M. Tran, D. Guennec, P. Martin, A. Sini, D. Loline, A. Delhay, and P.-F. Marteau, \"Spoofed speech detection with a focus on speaker embedding,\" in Proc. INTERSPEECH, 2024, pp. 2080-2084.", + "[66] B. Wang, Y. Tang, F. Wei, Z. Ba, and K. Ren, \"FTDKD: Frequency-time domain knowledge distillation for low-quality compressed audio deepfake detection,\" IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 32, pp. 4905-4918, 2024.", + "[67] Y. Zhang, J. Lu, Z. Shang, W. Wang, and P. Zhang, “Improving short utterance anti-spoofing with AASIST2,” in Proc. ICASSP, 2024, pp. 11636-11640.", + "[68] Y. Guo, H. Huang, X. Chen, H. Zhao, and Y. Wang, \"Audio deepfake detection with self-supervised WavLm and multi-fusion attentive classifier,\" in Proc. ICASSP, 2024, pp. 12702-12706.", + "[69] Z. Wang, R. Fu, Z. Wen, J. Tao, X. Wang, Y. Xie, X. Qi, S. Shi, Y. Lu, Y. Liu et al., \"Mixture of experts fusion for fake audio detection using frozen wav2vec 2.0,\" arXiv preprint arXiv:2409.11909, 2024.", + "[70] J. Lu, Y. Zhang, W. Wang, Z. Shang, and P. Zhang, “One-class knowledge distillation for spoofing speech detection,” in Proc. ICASSP, 2024, pp. 11251-11255.", + "[71] W. Huang, Y. Gu, Z. Wang, H. Zhu, and Y. Qian, \"Generalizable audio deepfake detection via latent space refinement and augmentation,\" in Proc. ICASSP, 2025, pp. 1-5.", + "[72] Z. Jin, L. Lang, and B. Leng, \"Wave-spectrogram cross-modal aggregation for audio deepfake detection,\" in Proc. ICASSP, 2025, pp. 1-5.", + "[73] C. Y. Kwok, D.-T. Truong, and J. Q. Yip, \"Robust audio deepfake detection using ensemble confidence calibration,\" in Proc. ICASSP, 2025, pp. 1-5.", + "[74] O. Pascu, A. Stan, D. Oneata, E. Oneata, and H. Cucu, \"Towards generalisable and calibrated audio deepfake detection with self-supervised representations,\" in Proc. INTERSPEECH, 2024, pp. 4828-4832.", + "[75] H.-T. Luong, H. Li, L. Zhang, K. A. Lee, and E. S. Chng, “LlamaPartial-Spoof: An LLM-driven fake speech dataset simulating disinformation generation,” arXiv preprint arXiv:2409.14743, 2024.", + "[76] T. Liu, K. A. Lee, Q. Wang, and H. Li, \"Disentangling voice and content with self-supervision for speaker recognition,\" Proc. Adv. Neural Inf. Process. Syst. (NeurIPS), vol. 36, pp. 50221-50236, 2023.", + "[77] S. Wang, Z. Chen, K. A. Lee, Y. Qian, and H. Li, “Overview of speaker modeling and its applications: From the lens of deep speaker representation learning,” IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 32, pp. 4971–4998, 2024." + ], + "bbox": [ + 506, + 70, + 919, + 828 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY", + "bbox": [ + 76, + 29, + 452, + 41 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 13 + } +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05657/73cf1d18-a4f6-40ed-812d-0ba2712fd755_model.json b/data/2025/2504_05xxx/2504.05657/73cf1d18-a4f6-40ed-812d-0ba2712fd755_model.json new file mode 100644 index 0000000000000000000000000000000000000000..aa2f7be8ca138265ddbab6802f249af341536d4e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/73cf1d18-a4f6-40ed-812d-0ba2712fd755_model.json @@ -0,0 +1,3429 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.078, + 0.03, + 0.454, + 0.042 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.032, + 0.921, + 0.041 + ], + "angle": 0, + "content": "1" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.07, + 0.316, + 0.084 + ], + "angle": 0, + "content": "The current version is 'Preprint'." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.086, + 0.49, + 0.129 + ], + "angle": 0, + "content": "This work has been submitted to the IEEE for possible publication. Copyright may be transferred without notice, after which this version may no longer be accessible." + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.131, + 0.472, + 0.145 + ], + "angle": 0, + "content": "This information aligns with the guidelines available at:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.147, + 0.484, + 0.19 + ], + "angle": 0, + "content": "https://journals.ieeethorcenter.ieee.org/become-an-iiie-journal-author/publishing-ethics/guidelines-and-policies/post-publication-policies/" + }, + { + "type": "aside_text", + "bbox": [ + 0.026, + 0.269, + 0.058, + 0.729 + ], + "angle": 270, + "content": "arXiv:2504.05657v2 [eess.AS] 26 Oct 2025" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.454, + 0.042 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + }, + { + "type": "page_number", + "bbox": [ + 0.911, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "2" + }, + { + "type": "title", + "bbox": [ + 0.107, + 0.071, + 0.895, + 0.141 + ], + "angle": 0, + "content": "Nes2Net: A Lightweight Nested Architecture for Foundation Model Driven Speech Anti-spoofing" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.148, + 0.885, + 0.183 + ], + "angle": 0, + "content": "Tianchi Liu, Student Member, Duc-Tuan Truong, Student Member, Rohan Kumar Das, Senior Member, Kong Aik Lee, Senior Member, Haizhou Li, Fellow" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.236, + 0.493, + 0.526 + ], + "angle": 0, + "content": "Abstract—Speech foundation models have significantly advanced various speech-related tasks by providing exceptional representation capabilities. However, their high-dimensional output features often create a mismatch with downstream task models, which typically require lower-dimensional inputs. A common solution is to apply a dimensionality reduction (DR) layer, but this approach increases parameter overhead, computational costs, and risks losing valuable information. To address these issues, we propose Nested Res2Net (Nes2Net), a lightweight back-end architecture designed to directly process high-dimensional features without DR layers. The nested structure enhances multi-scale feature extraction, improves feature interaction, and preserves high-dimensional information. We first validate Nes2Net on CtrSVDD, a singing voice deepfake detection dataset, and report a \\(22\\%\\) performance improvement and an \\(87\\%\\) back-end computational cost reduction over the state-of-the-art baseline. Additionally, extensive testing across four diverse datasets: ASVspoof 2021, ASVspoof 5, PartialSpoof, and In-the-Wild, covering fully spoofed speech, adversarial attacks, partial spoofing, and real-world scenarios, consistently highlights Nes2Net's superior robustness and generalization capabilities. The code package and pre-trained models are available at https://github.com/Liu-Tianchi/Nes2Net." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.533, + 0.49, + 0.561 + ], + "angle": 0, + "content": "Index Terms—DeepFake detection, speech anti-spoofing, Res2Net, Nes2Net, SSL, speech foundation model" + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.584, + 0.352, + 0.598 + ], + "angle": 0, + "content": "I. INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.605, + 0.493, + 0.697 + ], + "angle": 0, + "content": "SPEECH foundation models, such as wav2vec 2.0 [1], HuBERT [2], and WavLM [3], have revolutionized speech processing by leveraging large-scale pretraining to capture complex acoustic and linguistic patterns [4]. This has driven notable advances in automatic speech recognition (ASR) [5], speaker verification (SV) [6], and other speech applications." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.697, + 0.493, + 0.773 + ], + "angle": 0, + "content": "Beyond traditional tasks, speech foundation models also show great promise in addressing critical security concerns, particularly speech anti-spoofing (also referred to as deepfake detection) [7]. With the growing sophistication of spoofing techniques, such as voice conversion, ensuring the reliability" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.785, + 0.493, + 0.819 + ], + "angle": 0, + "content": "Tianchi Liu and Haizhou Li are with the Department of Electrical and Computer Engineering, National University of Singapore, Singapore. Tianchi Liu is also with LIGHTSPEED, Singapore (email: tianchi.liu@u.nus.edu);" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.819, + 0.493, + 0.842 + ], + "angle": 0, + "content": "Duc-Tuan Truong is with the Nanyang Technological University, Singapore (email: truongdu001@e.ntu.edu.sg);" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.842, + 0.493, + 0.865 + ], + "angle": 0, + "content": "Rohan Kumar Das is with the Fortemedia Singapore, Singapore (email: ecerohan@gmail.com);" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.864, + 0.493, + 0.91 + ], + "angle": 0, + "content": "Kong Aik Lee is with the Department of Electrical and Electronic Engineering and the Research Centre for Data Science & Artificial Intelligence, The Hong Kong Polytechnic University, Hong Kong (e-mail: kongaik.lee@polyu.edu.hk);" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.91, + 0.493, + 0.946 + ], + "angle": 0, + "content": "Haizhou Li is also with the Shenzhen Research Institute of Big Data, School of Artificial Intelligence, School of Data Science, The Chinese University of Hong Kong, Shenzhen, China (email: haizhouli@cuhk.edu.cn)." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.236, + 0.921, + 0.296 + ], + "angle": 0, + "content": "and security of speech-driven systems has become a pressing concern [8]–[12]. Leveraging the rich representations of these foundation models could significantly improve the robustness and generalization of anti-spoofing systems [13]–[15]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.296, + 0.922, + 0.582 + ], + "angle": 0, + "content": "While speech foundation models offer exceptional representations, their high-dimensional feature outputs present significant challenges for downstream tasks. Downstream models used in tasks like speech anti-spoofing typically require lower-dimensional features [15]–[17]. To address this mismatch, a common approach is to introduce a dimensionality reduction (DR) layer, usually implemented as a fully connected (FC) layer for transforming high-dimensional features into lower-dimensional features. However, this conventional strategy presents notable drawbacks. Given that downstream classifiers are typically compact [15], [16], the DR layer alone often consumes a substantial portion of the parameters and computational resources within the entire back-end model. Moreover, directly projecting high-dimensional features in a one-shot manner through an FC layer leads to the loss of important information, reducing the effectiveness of speech foundation models. These issues highlight the need for a more efficient and effective solution to bridge the dimensionality gap and fully utilize speech foundation models in downstream tasks." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.582, + 0.922, + 0.734 + ], + "angle": 0, + "content": "To address these challenges, we propose Nested Res2Net (Nes2Net) to process high-dimensional features from speech foundation models, eliminating the need for a DR layer while preserving the richness of the original representations. By addressing key limitations of DR layers, such as excessive computational cost and information loss, Nes2Net offers a more efficient and effective solution. This design makes it particularly suitable for tasks requiring a balance of high performance and efficiency, such as speech anti-spoofing. The key contributions of this work can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.734, + 0.92, + 0.794 + ], + "angle": 0, + "content": "- Novel Architecture: We introduce Nes2Net, a new approach that effectively addresses the limitations of DR layers. Nes2Net retains the expressive power of high-dimensional features while reducing model complexity." + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.795, + 0.922, + 0.9 + ], + "angle": 0, + "content": "- Enhanced Performance, Efficiency, and Generalization: Our method demonstrates a \\(22\\%\\) performance gain and an \\(87\\%\\) reduction in computational costs compared to the state-of-the-art baselines on the CtrlSVDD dataset. Further experiments conducted on four additional datasets across various scenarios demonstrate strong generalization capability and consistently superior performance." + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.9, + 0.922, + 0.946 + ], + "angle": 0, + "content": "- Reproducibility: To facilitate further research and application, we make our scripts and pre-trained models publicly available." + }, + { + "type": "list", + "bbox": [ + 0.52, + 0.734, + 0.922, + 0.946 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.454, + 0.042 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "3" + }, + { + "type": "title", + "bbox": [ + 0.211, + 0.07, + 0.357, + 0.083 + ], + "angle": 0, + "content": "II. RELATED WORK" + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.089, + 0.158, + 0.102 + ], + "angle": 0, + "content": "A. Res2Net" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.108, + 0.493, + 0.304 + ], + "angle": 0, + "content": "Res2Net [18] is a well-known architecture designed to extract multi-scale features. Unlike ResNet [19], Res2Net uses hierarchical residual connections within a single block, allowing it to capture patterns across varying receptive fields simultaneously [18]. This design offers proven advantages in speech-related tasks, such as SV [20]–[22] and anti-spoofing [23]–[25], where capturing subtle variations and complex acoustic patterns is important. As shown in Fig. 1, Res2Net (highlighted using a light red block) can also serve as a classifier within a speech foundation model-based anti-spoofing system. Its ability to extract multi-scale features has led to superior performance over conventional models and motivates the design of Nested Res2Net in this work." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.323, + 0.49, + 0.337 + ], + "angle": 0, + "content": "B. Hand-crafted Feature-based Speech Anti-Spoofing Models" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.341, + 0.493, + 0.538 + ], + "angle": 0, + "content": "Hand-crafted acoustic features (such as MFCC) are common choices for many earlier speech anti-spoofing systems. These systems have evolved to effectively detect speech deepfakes [26], [27]. For instance, the Channel-wise Gated Res2Net (CG-Res2Net) [23] introduces a gating mechanism within the Res2Net architecture, enabling dynamic selection of channel-wise features to enhance generalization to unseen attacks. A widely recognized model is AASIST [26], which employs spectro-temporal graph attention layers to capture both temporal and spectral artifacts, thereby achieving efficient and accurate detection. Given AASIST's SOTA performance and its wide adoption in recent anti-spoofing challenges [16], [28], we consider it as our main baseline for evaluation." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.557, + 0.285, + 0.571 + ], + "angle": 0, + "content": "C. Speech Foundation Models" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.575, + 0.492, + 0.742 + ], + "angle": 0, + "content": "Speech foundation models are often referred to as Self-Supervised Learning (SSL) models due to their typical pretraining on large amounts of unlabeled speech data using self-supervised learning techniques. Examples include wav2vec 2.0 [1], HuBERT [2], and WavLM [3]. Unlike hand-crafted acoustic features, which are limited in their ability to adapt to diverse and complex conditions, self-supervised learning (SSL) models learn rich and generalized speech representations that can be effectively adapted to various downstream applications. This allows them to achieve superior performance in speech-related tasks, including speech anti-spoofing." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.76, + 0.416, + 0.775 + ], + "angle": 0, + "content": "D. Speech Foundation Model-based Anti-spoofing" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.779, + 0.491, + 0.914 + ], + "angle": 0, + "content": "As discussed in the previous subsection, speech foundation models can capture more informative representations than handcrafted or raw acoustic features [3]. This makes them highly effective for speech anti-spoofing, as they generalize well across datasets and are more robust to unseen attacks [15]. As a result, many recent anti-spoofing systems increasingly adopt these models as front-ends, feeding their features to the back-end classifiers and consistently outperforming traditional models [16], [29], [30]." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.915, + 0.492, + 0.947 + ], + "angle": 0, + "content": "To connect these powerful front-end models to downstream classifiers, a feature aggregation layer is introduced, as shown" + }, + { + "type": "table_caption", + "bbox": [ + 0.517, + 0.072, + 0.91, + 0.119 + ], + "angle": 0, + "content": "TABLEI CONTRIBUTION OF THE DR LAYER ON THE NUMBER OF PARAMETERS AND COMPUTATIONAL COST IN BACK-END MODELS. MMACS STANDS FOR MILLION MULTIPLY-ACCUMULATE OPERATIONS." + }, + { + "type": "table", + "bbox": [ + 0.513, + 0.127, + 0.912, + 0.213 + ], + "angle": 0, + "content": "
Back-end ModelParametersMMACs
DRTotal%DRTotal%
ResNet [19]131k611k21%26.2470.6237%
Res2Net [18]131k452k29%26.2464.9340%
ECAPA [34]131k497k26%26.2480.2133%
AASIST [26]131k447k29%26.24707.654%
" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.229, + 0.921, + 0.29 + ], + "angle": 0, + "content": "in Fig. 1. This layer combines features from different SSL layers using methods such as a simple weighted sum or attention-based methods like Squeeze-and-Excitation Aggregation (SEA) [16] and Attentive Merging (AttM) [31]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.29, + 0.922, + 0.47 + ], + "angle": 0, + "content": "Following the aggregation layer, the resulting features are passed to the back-end classifier, as shown in the green box of Fig. 1. Existing methods typically use a DR layer, which reduces the high-dimensional features of \\( N \\) channels (commonly \\( N = 1024 \\) [1], [3], [32]) to a lower dimension \\( D \\) (e.g., \\( D = 128 \\) [15], [16] or \\( D = 144 \\) [17], [33]) to match the classifier's input requirements. The classifier model then extracts features from the DR layer outputs and produces the final score. As illustrated in the red box of Fig. 1, commonly used classifier structures include traditional models such as ResNet [19], Res2Net [18], ECAPA-TDNN [34], and AASIST [26]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.47, + 0.922, + 0.636 + ], + "angle": 0, + "content": "The strong performance of these systems stems from their ability to capture rich speech representations, enabling more accurate distinction between real and spoofed speech. As a result, these systems have achieved SOTA results [33], [35], [36], especially in recent challenges like ASVspoof 5 [28], [37], CtrSVDD [16], [38], [39], and ADD [40]. However, the use of a DR layer introduces challenges that limit the backend's ability to fully leverage the rich representations from speech foundation models. In this work, we aim to better unlock the potential of foundation models for speech antispoofing. These issues will be discussed in the next subsection." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.655, + 0.842, + 0.67 + ], + "angle": 0, + "content": "E. Limitation of Dimensionality Reduction Layer" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.674, + 0.921, + 0.794 + ], + "angle": 0, + "content": "Existing speech foundation model-based anti-spoofing systems excel in extracting rich, high-dimensional feature representations, which capture intricate patterns in speech. However, this high dimensionality poses a significant challenge for downstream tasks. Models in these tasks typically require lower-dimensional features [23], [26], [27], creating a mismatch between the output features of the foundation models and the requirements of downstream processing." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.795, + 0.922, + 0.915 + ], + "angle": 0, + "content": "A commonly used approach for dimensionality reduction is to employ a DR layer. However, this approach has several issues, including parameter overhead and potential information loss. As shown in Table I, our analysis of back-end models further emphasizes the inefficiency of this approach. We consider commonly used feature dimensions of \\( N = 1024 \\) from large models [1], [3], and a reduced dimension of \\( D = 128 \\), widely adopted in SOTA back-end models [15], [16], [31]." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.915, + 0.922, + 0.947 + ], + "angle": 0, + "content": "Across various back-end models, the DR layer, despite being just a single layer, consistently accounts for a substantial" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.078, + 0.03, + 0.454, + 0.042 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.032, + 0.921, + 0.041 + ], + "angle": 0, + "content": "4" + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.071, + 0.919, + 0.328 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.337, + 0.925, + 0.397 + ], + "angle": 0, + "content": "Fig. 1. The block diagram of the speech foundation model-based speech anti-spoofing system, showcasing both the traditional back-end models and the proposed Nes2Net back-end. The traditional back-end models include a DR layer and a classifier, such as ResNet [19], Res2Net [18], ECAPA-TDNN [34], and AASIST [26]. In contrast, the proposed Nes2Net back-end model features a DR layer-free design. Additionally, an enhanced version of its nested layer, named Nes2Net-X, is introduced to further improve performance. Abbreviations used in the figure include: 'FC' (fully connected layer), 'Conv' (convolutional layer), 'WS' (weighted sum), 'SE' (squeeze-and-excitation module) [41], and 'Att. Stat. Pool.' (attentive statistics pooling) [42]." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.412, + 0.492, + 0.546 + ], + "angle": 0, + "content": "share of parameters and computational cost, underscoring its resource-intensive nature. For instance, the DR layer accounts for \\(21\\%\\) to \\(29\\%\\) of the parameters across ResNet, Res2Net, ECAPA, and AASIST. In terms of computational cost, the DR layer generally contributes at least one-third of the total MACs. AASIST is the only exception, where the DR layer accounts for just \\(4\\%\\) of the MACs, primarily because its overall MAC count is an order of magnitude higher than that of other models." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.548, + 0.493, + 0.624 + ], + "angle": 0, + "content": "This table highlights that a single DR layer significantly inflates the back-end model's size and resource demands. Furthermore, its direct projection design discards important high-dimensional features, limiting the overall potential of speech foundation models." + }, + { + "type": "title", + "bbox": [ + 0.209, + 0.645, + 0.36, + 0.658 + ], + "angle": 0, + "content": "III. METHODOLOGY" + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.668, + 0.348, + 0.683 + ], + "angle": 0, + "content": "A. Proposed Nested Res2Net (Nes2Net)" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.687, + 0.491, + 0.793 + ], + "angle": 0, + "content": "The design of Nes2Net is driven by two primary objectives: 1) effectively and efficiently utilizing the high-dimensional features from speech foundation models, and 2) enhancing multi-scale feature extraction to achieve robust generalization in speech anti-spoofing tasks. These objectives are realized through a novel nested architecture that simultaneously improves the efficiency, flexibility, and robustness of the model." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.794, + 0.492, + 0.946 + ], + "angle": 0, + "content": "Efficiency and Retention of Rich Feature Information: The analysis in Section II-E reveals the limitations of employing the DR layer. Building upon the observations, Nes2Net entirely removes the DR layer, directly processing high-dimensional features to retain their intrinsic richness and minimize unnecessary computational costs. By bypassing the DR layer, Nes2Net prevents the information bottleneck typically caused by early dimensionality reduction. This ensures the preservation of detailed representations essential for accurately distinguishing genuine speech from spoofed audio." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.412, + 0.923, + 0.593 + ], + "angle": 0, + "content": "Enhanced Multi-Scale Feature Interaction and Expressiveness: While the Res2Net architecture effectively extracts multi-scale features through hierarchical splits, it exhibits significant limitations when processing high-dimensional features directly, especially with large split scales \\( s \\). Specifically, Res2Net suffers from feature dilution [18], redundant transformations [43], and restricted interactions among channels. Excessive splitting fragments the features, weakening their expressiveness, and repetitive transformations increase computational redundancy, potentially causing overfitting. Moreover, closely related information can be distributed across non-adjacent subsets, limiting effective cross-channel interactions." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.596, + 0.923, + 0.852 + ], + "angle": 0, + "content": "To overcome these limitations, as illustrated in Fig. 1, we propose a novel Nested Res2Net (Nes2Net) architecture that introduces a hierarchical nesting structure. This additional degree of flexibility significantly enhances the model's representational capability. Each nested layer progressively refines features by building upon outputs from preceding layers and also incorporates efficient local cross-channel attention mechanisms [44], [45], strengthening interactions across channels. This holistic feature extraction approach enables Nes2Net to comprehensively capture intricate speech patterns. Moreover, the cumulative refinement effectively mitigates the issue of feature dilution, preserving rich and expressive multi-scale information. Benefiting from the structural advantages of the nesting strategy, the need for excessive fine-grained splits is reduced, effectively mitigating redundant transformations. This approach also minimizes unnecessary computations, resulting in a compact yet highly expressive model." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.855, + 0.924, + 0.946 + ], + "angle": 0, + "content": "Critically, overfitting is a well-known challenge in speech anti-spoofing tasks, often leading to degraded performance in cross-domain scenarios. Previous studies [23], [26], particularly with compact models like AASIST and Res2Net (both with fewer than 500k parameters), have shown that smaller models can help reduce overfitting. Our experiments with these" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.454, + 0.042 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.492, + 0.175 + ], + "angle": 0, + "content": "models confirm that simply increasing their size does not always lead to better performance and can, in fact, make overfitting worse. As a result, improving feature quality through smarter model structure design becomes more important than just scaling up the model. The nested architecture of Nes2Net provides clear benefits as it maintains computational efficiency while reducing the risk of overfitting." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.175, + 0.492, + 0.205 + ], + "angle": 0, + "content": "The Nes2Net consists of an outer layer and several identical nested layers, described as follows:" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.206, + 0.493, + 0.342 + ], + "angle": 0, + "content": "1) Outer Layer: The outer layer of Nes2Net adopts a structure similar to that of Res2Net. The high-dimensional features produced by a speech foundation model are uniformly split into \\( s_1 \\) feature map subsets, denoted by \\( x_i \\), where \\( i \\in \\{1, 2, \\dots, s_1\\} \\). Each feature subset \\( x_i \\) has the same spatial size but contains only \\( \\frac{1}{s_1} \\) of the channels of the input feature map. With the exception of \\( x_1 \\), each \\( x_i \\) is paired with a corresponding nested layer, denoted by \\( \\mathbf{K}_i(\\cdot) \\). The output of \\( \\mathbf{K}_i(\\cdot) \\), represented as \\( y_i \\), is computed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.158, + 0.354, + 0.492, + 0.41 + ], + "angle": 0, + "content": "\\[\ny _ {i} = \\left\\{ \\begin{array}{l l} x _ {i} & i = 1; \\\\ \\mathbf {K} _ {i} \\left(x _ {i}\\right) & i = 2; \\\\ \\mathbf {K} _ {i} \\left(x _ {i} + y _ {i - 1}\\right) & 2 < i \\leq s _ {1}. \\end{array} \\right. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.417, + 0.492, + 0.553 + ], + "angle": 0, + "content": "where \\( x_{i} \\) is first added to the output of \\( \\mathbf{K}_{i - 1}(\\cdot) \\), and the resulting feature map is then fed into \\( \\mathbf{K}_i(\\cdot) \\) for further processing. All \\( y_{i} \\) features are concatenated along the channel dimension. Due to the combinatorial explosion effect [18], the output features encapsulate a fusion of receptive field characteristics across different scales and frame levels. These features are then pooled along the time axis to convert frame-level features into utterance-level representations, which are subsequently used to compute the final classification score." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.554, + 0.492, + 0.629 + ], + "angle": 0, + "content": "It is worth noting that since the outer layer directly processes high-dimensional features from the speech foundation model, the original two convolutional layers (kernel size of 1) used before splitting and after concatenation in Res2Net are removed to improve efficiency." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.63, + 0.493, + 0.69 + ], + "angle": 0, + "content": "2) Nested Layer: The nested layer acts as the core module responsible for processing the outer layer's intermediate features, denoted by \\( x_{i}^{\\prime} \\), where \\( i \\in \\{2, \\ldots, s_1\\} \\). Based on Eq. 1, \\( x_{i}^{\\prime} \\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.172, + 0.701, + 0.49, + 0.742 + ], + "angle": 0, + "content": "\\[\nx _ {i} ^ {\\prime} = \\left\\{ \\begin{array}{l l} x _ {i} & i = 2; \\\\ x _ {i} + y _ {i - 1} & 2 < i \\leq s _ {1}. \\end{array} \\right. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.748, + 0.493, + 0.839 + ], + "angle": 0, + "content": "Each nested layer \\(\\mathbf{K}_i(\\cdot)\\) is designed to extract multi-scale representations from its input while maintaining computational efficiency. As shown in Fig. 1, the structure of \\(\\mathbf{K}_i(\\cdot)\\) follows a SE-Res2Net-like design, but its input is the feature subset \\(x_i'\\) from the outer layer of Nes2Net. Specifically, each nested layer consists of the following components:" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.839, + 0.492, + 0.884 + ], + "angle": 0, + "content": "Convolutional Layers: The input feature map is first processed by a convolutional layer with a kernel size of 1 to extract local features while preserving the spatial dimensions." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.885, + 0.493, + 0.947 + ], + "angle": 0, + "content": "Multi-Scale Feature Extraction: To enable multi-scale processing, the input feature map \\( x_{i}^{\\prime} \\) is equally split into \\( s_2 \\) subsets along the channel dimension, denoted by \\( x_{i,j}^{\\prime} \\), where \\( j \\in \\{1, 2, \\ldots, s_2\\} \\). Each subset undergoes separate" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.07, + 0.921, + 0.1 + ], + "angle": 0, + "content": "transformations through convolutional operations \\(\\mathbf{M}_j\\) with varying receptive fields, yielding \\(y_{i,j}\\), formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.568, + 0.105, + 0.921, + 0.145 + ], + "angle": 0, + "content": "\\[\ny _ {i, j} = \\left\\{ \\begin{array}{l l} x _ {i, j} ^ {\\prime} & j = 1; \\\\ \\mathbf {M} _ {j} \\left(x _ {i, j} ^ {\\prime} + y _ {i, j - 1}\\right) & 1 < j \\leq s _ {2}. \\end{array} \\right. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.149, + 0.921, + 0.179 + ], + "angle": 0, + "content": "These transformed subsets are then concatenated to form the output \\( y_{i} \\) of the nested layer." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.179, + 0.921, + 0.254 + ], + "angle": 0, + "content": "SE Module: To further enhance the feature representations, a Squeeze-and-Excitation (SE) module is integrated into each nested layer. The SE module adaptively recalibrates channelwise features to emphasize informative features and suppress less relevant ones [41]." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.254, + 0.921, + 0.314 + ], + "angle": 0, + "content": "Residual Connections: To enhance gradient flow and stabilize training, a residual connection is applied by adding the input of \\( x_{i}^{\\prime} \\) to its output \\( y_{i} \\). This design preserves the original information while incorporating newly learned features." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.314, + 0.921, + 0.358 + ], + "angle": 0, + "content": "In summary, the nested layer is lightweight, highly efficient, and designed to improve robustness and generalization across diverse conditions." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.374, + 0.796, + 0.388 + ], + "angle": 0, + "content": "B. Enhanced Nested Res2Net (Nes2Net-X)" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.392, + 0.922, + 0.573 + ], + "angle": 0, + "content": "Nes2Net efficiently addresses the high-dimensional feature issue. However, it relies on an additive combination method within the nested layer, which may limit the flexibility and effectiveness of feature extraction, as it implicitly assigns equal importance to all features. To further enhance the representational capacity of Nes2Net, we propose an improved variant named Nes2Net-X. It replaces the original addition operation in the nested layer with a concatenation followed by a learnable weighted summation. This design explicitly preserves feature subset individuality before fusion and employs learnable weights to adaptively combine these subsets. The Nes2Net-X consists of the following components:" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.574, + 0.922, + 0.663 + ], + "angle": 0, + "content": "Feature Splitting and Processing: This component is the same as that in Nes2Net nested layer. The input feature \\( x_{i}^{\\prime} \\) is equally split into \\( s_2 \\) subsets along the channel dimension, denoted by \\( x_{i,j}^{\\prime} \\), where \\( j \\in \\{1, 2, \\dots, s_2\\} \\). Each subset \\( x_{i,j}^{\\prime} \\) undergoes a convolutional operation to extract feature representations." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.664, + 0.922, + 0.739 + ], + "angle": 0, + "content": "Feature Concatenation: The outputs of the convolutional layers are denoted as \\( z_{i,j} \\). In Nes2Net-X, instead of summing the processed features as in the Nes2Net, each current subset \\( x_{i,j}^{\\prime} \\) is concatenated with the previous output \\( z_{i,j-1} \\) along a newly introduced dimension before being processed." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.739, + 0.922, + 0.874 + ], + "angle": 0, + "content": "Weighted Sum: The additional dimension created during concatenation is merged back into the original feature space using a 'weighted sum' operation. This operation enables the model to dynamically assign importance to each subset, enhancing feature representation. For each subset, the 'weighted sum' is applied to the output feature \\( z_{i,j} \\) of the convolutional layer. Let \\( w_{i,j} \\) denote the learnable weights assigned to each concatenated feature. The output \\( y_{i,j} \\) of the 'weighted sum' is computed as:" + }, + { + "type": "equation", + "bbox": [ + 0.633, + 0.873, + 0.921, + 0.911 + ], + "angle": 0, + "content": "\\[\ny _ {i, j} = \\sum_ {k = 1} ^ {s} w _ {i, j, k} \\cdot z _ {i, j, k} \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.915, + 0.922, + 0.947 + ], + "angle": 0, + "content": "where \\(s\\) denotes the number of subsets, \\(w_{i,j,k}\\) represents the weight for the \\(k\\)-th subset features \\(z_{i,j,k}\\)." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.454, + 0.042 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.032, + 0.921, + 0.041 + ], + "angle": 0, + "content": "6" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.07, + 0.492, + 0.1 + ], + "angle": 0, + "content": "The weighted summation provides more flexible and effective feature integration, offering several advantages:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.102, + 0.493, + 0.147 + ], + "angle": 0, + "content": "- Enhanced Feature Diversity: By concatenating features across subsets, the network captures a richer set of features, encompassing various aspects of the input data." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.147, + 0.493, + 0.222 + ], + "angle": 0, + "content": "- Learnable Feature Fusion: The introduction of learnable weights \\( w \\) enables the model to prioritize more informative features, effectively suppressing less relevant ones. This adaptive mechanism allows the network to focus on the most discriminative features for the task." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.223, + 0.493, + 0.298 + ], + "angle": 0, + "content": "- Improved Gradient Flow: By combining concatenation with weighted summation, the model facilitates better gradient propagation during training. This helps address potential issues such as vanishing or exploding gradients, leading to more stable and efficient learning." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.102, + 0.493, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.3, + 0.492, + 0.346 + ], + "angle": 0, + "content": "These modifications enable Nes2Net-X to retain the strengths of Nes2Net while introducing greater flexibility in feature fusion, ultimately improving performance." + }, + { + "type": "title", + "bbox": [ + 0.182, + 0.359, + 0.386, + 0.372 + ], + "angle": 0, + "content": "IV. EXPERIMENTAL SETUPS" + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.378, + 0.161, + 0.391 + ], + "angle": 0, + "content": "A. Datasets" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.41, + 0.458, + 0.434 + ], + "angle": 0, + "content": "TABLE II AN SUMMARY OF THE DATASETS USED IN OUR EXPERIMENTS." + }, + { + "type": "table", + "bbox": [ + 0.076, + 0.442, + 0.49, + 0.569 + ], + "angle": 0, + "content": "
DatasetSpoofing TypeNumber of Samples
TrainValidTest
CtrSVDD w/o ACEsinger bona fide [46]Singing Voice84,40443,62564,734
CtrSVDD w/ ACEsinger bona fide [46]67,579
ASVspoof 2019 [47]25,38024,844-
ASVspoof 2021 LA [48]--181,566
ASVspoof 2021 DF [48]Speech--611,829
ASVspoof 5 [49]182,357140,950680,774
In-the-Wild [50]--31,779
PartialSpoof [51]Partial Spoof25,38024,84471,237
" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.583, + 0.493, + 0.947 + ], + "angle": 0, + "content": "We use five datasets across various scenarios, including singing voice deepfake, fully spoofed speech, adversarial attacks, and partially spoofed speech, to evaluate the performance of the proposed model. Singing voice deepfake detection (SVDD) is a growing area of interest in the research community [46], [52], [53]. The CtrlSVDD dataset [46], [52] offers structured attack types and official evaluation protocols, making it suitable for systematic architecture exploration. As a newly collected resource, it captures recent spoofing techniques, providing a more challenging and relevant benchmark for modern anti-spoofing systems. We therefore adopt it as a representative example. Moreover, fully spoofed speech is the most studied category. In this work, we include two categories of datasets: (1) the ASVspoof series, which comprises ASVspoof 2019 [47], ASVspoof 2021 Logical Access (LA), ASVspoof 2021 Deepfake (DF) [48], and ASVspoof 5 [49]; and (2) the In-the-Wild dataset [50], which reflects real-world usage scenarios. Partially spoofed speech alters only part of an utterance to convey deceptive meaning. This emerging challenge has attracted growing attention. We use the PartialSpoof [51] dataset as a representative benchmark. Table II summarizes the datasets used in this study. Models are trained on the training set and validated on the validation set to select the best checkpoint for testing." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.07, + 0.923, + 0.237 + ], + "angle": 0, + "content": "For CtrlSVDD [46], we report results on two official test protocols, according to whether ACESinger bona fide samples are included. The 'A14' attack type of the CtrlSVDD dataset is excluded following the official guidelines [46]. ASVspoof 2019 [47] is used only for training and validation, while the In-the-Wild [50], ASVspoof 2021 LA and DF [48] datasets are used only for testing. For the recently released ASVspoof 5 dataset [49], we use its train, development, and evaluation partitions for model training, validation, and testing, respectively. For PartialSpoof [51], we follow the standard partitioning into train, development, and evaluation sets." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.254, + 0.661, + 0.269 + ], + "angle": 0, + "content": "B. Training Strategies" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.272, + 0.923, + 0.378 + ], + "angle": 0, + "content": "Each experiment is run three times using different random seeds. We report both the result from the best-performing run and the average performance across all runs. The values of \\( s_1 \\) and \\( s_2 \\) are both set to 8 for Nes2Net and Nes2Net-X. The baseline systems for each dataset are built using SOTA models, and our proposed model adopts similar training strategies. The details are as follows:" + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.391, + 0.912, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.505, + 0.476, + 0.861, + 0.489 + ], + "angle": 0, + "content": "Fig. 2. The cyclic learning rate schedule using cosine annealing." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.503, + 0.923, + 0.639 + ], + "angle": 0, + "content": "CtSVDD: For the models trained on the CtrSVDD dataset [46], [52], we follow the baseline system from \\([16]^1\\). Following the setting in [16], we use a random seed of 42 to ensure reproducibility. Furthermore, due to the inherent stochasticity in deep learning, repeated runs are necessary to obtain reliable average results. We use the AdamW optimizer with batch size 34, an initial learning rate of \\(1 \\times 10^{-6}\\), and weight decay of \\(1 \\times 10^{-4}\\). The learning rate is scheduled using cosine annealing with a cycle to a minimum of \\(1 \\times 10^{-9}\\)." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.64, + 0.923, + 0.819 + ], + "angle": 0, + "content": "As shown in Fig. 2, over 75 training epochs, we select checkpoints from the epoch with the minimum learning rate, as well as its preceding and following epochs, for validation. The best validation result is then used for testing. We use binary focal loss [54], a generalization of binary cross-entropy loss, with a focusing parameter \\((\\gamma)\\) of 2 and a positive class weight \\((\\alpha)\\) of 0.25. To standardize input length, each sample is randomly cropped or padded to 4 seconds during training. We adopt the Rawboost 'parallel: \\((1)+(2)\\)' data augmentation strategy [55], as explored in [16]. WavLM is used as the frontend model for this dataset. The pre-trained and implementation of WavLM are obtained from S3PRL2." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.82, + 0.922, + 0.897 + ], + "angle": 0, + "content": "ASVspoof 2019 & 2021: For the models trained on the ASVspoof 2019 [47] dataset, we follow the baseline system proposed in \\([15]^3\\). Audio data are cropped or concatenated to create segments of approximately 4 seconds in duration (64,600 samples) for both training and testing. We use the" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.906, + 0.754, + 0.919 + ], + "angle": 0, + "content": "\\(^{1}\\)https://github.com/Anmol2059/SVDD2024" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.919, + 0.684, + 0.932 + ], + "angle": 0, + "content": "\\(^{2}\\)https://github.com/s3prl/s3prl" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.932, + 0.797, + 0.945 + ], + "angle": 0, + "content": "3https://github.com/TakHemlata/SSL_Anti-spoofing" + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.906, + 0.797, + 0.945 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.078, + 0.03, + 0.452, + 0.041 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.04 + ], + "angle": 0, + "content": "7" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.071, + 0.923, + 0.131 + ], + "angle": 0, + "content": "TABLE III PERFORMANCE IN EER \\((\\%)\\) ON THE CTRSVDD EVALUATION SET [46] WITH WAVLM [3] FRONT-END. RESULTS ARE SHOWN AS 'BEST (MEAN)' OVER 3 RUNS. PARAMETERS. AND MMACs REFER TO NUMBER OF PARAMETERS AND MILLION MULTIPLY-ACCUMULATE OPERATIONS, RESPECTIVELY. W/O AND W/ ACE B.F. REFER TO 'WITHOUT' AND 'WITH' ACESINGER BONA FIDE SAMPLES, RESPECTIVELY. ATTACK-SPECIFIC EERS ARE COMPUTED UNDER THE 'W/O ACE B.F' CONDITION. BEST RESULTS ARE IN BOLD; SECOND-BEST ARE UNDERlined.' \\(\\dagger\\) DENOTES IMPLEMENTATION CONDUCTED BY US." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.135, + 0.93, + 0.33 + ], + "angle": 0, + "content": "
Back-endParams.MMACsEER of Different Attack TypesPooled EER
A9A10A11A12A13w/o ACE. B.F.w/ ACE. B.F.
XWSB [39] *--------2.32
SLS [39]--------2.59
AASIST (C=32) [16]447k707.65------2.70
AASIST Light (C=24) †159k91.351.27 (1.37)0.87 (1.00)5.44 (5.86)4.84 (5.65)0.98 (1.05)3.95 (4.35)3.41 (3.77)
AASIST Standard(C=32) †447k707.651.18 (1.28)0.73 (0.86)3.63 (3.86)5.65 (5.77)0.88 (1.00)3.30 (3.36)2.79 (2.89)
AASIST Large(C=40) †662k1,091.281.32 (1.37)0.87 (0.97)3.70 (3.96)5.04 (5.63)0.96 (1.06)3.19 (3.36)2.71 (2.94)
AASIST XL(C=48) †835k1,555.561.23 (1.36)0.76 (0.92)3.40 (4.64)4.93 (5.55)0.89 (1.06)3.12 (3.62)2.76 (3.18)
AASIST XXL(C=56) †1,087k2,104.570.96 (1.20)0.66 (0.84)3.86 (4.15)4.83 (5.43)0.75 (0.95)3.05 (3.43)2.65 (2.95)
ResNet †611k70.621.18 (1.21)0.80 (0.93)3.97 (5.06)4.60 (4.86)0.96 (1.03)3.11 (3.61)2.74 (3.17)
Res2Net †452k64.931.26 (1.37)0.83 (0.86)3.59 (4.08)4.45 (4.80)1.08 (1.09)3.02 (3.24)2.61 (2.78)
ECAPA-TDNN (C=128) †497k80.211.18 (1.39)0.67 (0.85)4.47 (5.84)4.63 (4.96)0.87 (1.04)3.19 (3.74)2.79 (3.30)
Proposed Nes2Net511k58.111.23 (1.34)0.76 (0.81)2.40 (2.43)5.00 (5.24)0.96 (0.99)2.53 (2.55)2.22 (2.27)
Proposed Nes2Net-X511k91.351.21 (1.23)0.63 (0.76)2.09 (2.32)4.99 (5.24)0.83 (0.92)2.48 (2.51)2.20 (2.24)
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.078, + 0.331, + 0.873, + 0.341 + ], + "angle": 0, + "content": "\\(\\text{※}\\) XWSB is an ensemble-like model that combine two SSL front-ends [39], while all other models in Table III are based on single SSL front-end." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.359, + 0.49, + 0.554 + ], + "angle": 0, + "content": "Adam optimizer [56] with a weight decay of \\(1 \\times 10^{-4}\\). To reproduce the AASIST baseline [15], we reduce the original batch size from 14 to 8 due to GPU memory constraints, and halve the learning rate from \\(1 \\times 10^{-6}\\) to \\(5 \\times 10^{-7}\\). For Nes2Net, benefiting from its lower GPU memory consumption, we use a batch size of 12 with a learning rate of \\(2.5 \\times 10^{-7}\\). The loss function used is weighted Cross Entropy. Following [15], we apply Rawboost augmentations [55], specifically 'series: \\((1 + 2 + 3)\\)' (Algo4) and 'series: \\((1 + 2)\\)' (Algo5), for AASIST baselines. For the proposed Nes2Net-X, only the former augmentation is applied. All models are trained for 100 epochs and the best checkpoint on the validation set is used for testing on the ASVspoof 2021 [48] and In-the-Wild [50] datasets." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.555, + 0.49, + 0.659 + ], + "angle": 0, + "content": "ASVspoof 5: Both our AASIST baseline and the proposed Nes2Net-X models are trained using settings similar to those used for AASIST in the ASVspoof 2019 corpus. However, several differences apply. The final learning rate is set to \\(1 \\times 10^{-7}\\), we apply data augmentation using MUSAN [57] and RIR [58], and training is stopped if there is no improvement on the development set for 5 consecutive epochs." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.66, + 0.49, + 0.856 + ], + "angle": 0, + "content": "**PartialProof:** For models trained on the PartialSpoof [51], we follow the baseline systems described in [51], \\([59]^4\\). Specifically, we use wav2vec 2.0 as the front-end, the MSE for P2SGrad [60] as the loss function, and Adam [56] as the optimizer. Following [59], the batch size is set to 2, and a learning rate of \\(2.5 \\times 10^{-6}\\) is adopted for the baseline systems. For the proposed Nes2Net and Nes2Net-X, the learning rate is set to \\(1 \\times 10^{-5}\\). The pooling layer used for the proposed Nes2Net and Nes2Net-X is the Attentive Statistics Pooling [42], and the reduction ratio of SE module is set to 8. Training is terminated if no improvement is observed on the development set for 20 consecutive epochs. The epoch yielding the best performance on the development set is used for testing." + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.873, + 0.383, + 0.884 + ], + "angle": 0, + "content": "V. RESULTS AND ANALYSIS" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.891, + 0.49, + 0.92 + ], + "angle": 0, + "content": "All Equal Error Rate (EER) results in this work are reported as 'best (mean)' over multiple runs. For cited results that (1)" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.36, + 0.92, + 0.388 + ], + "angle": 0, + "content": "are based on a single run, (2) report only the best result, or (3) lack sufficient details, only a single value is presented." + }, + { + "type": "title", + "bbox": [ + 0.508, + 0.412, + 0.749, + 0.426 + ], + "angle": 0, + "content": "A. Studies on the CtrSVDD dataset" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.431, + 0.92, + 0.718 + ], + "angle": 0, + "content": "We conduct experiments on the CtrSVDD dataset [46], following two testing protocols: one including ACESinger bona fide samples and the other excluding them [38]. While results for both protocols are reported in Table III, our primary analysis focuses on the scenario 'without ACESinger bona fide (w/o ACE. B.F.)', as recommended by the dataset creators. Since AASIST \\((\\mathrm{C} = 32)\\) in our prior work [16], as well as SLS and XWSB [39], were evaluated during the CtrSVDD Challenge 2024, portions of their test sets differ from the current official protocol. As a result, the EER by attack type is not directly comparable. To ensure a fair comparison, we re-implemented the AASIST \\((\\mathrm{C} = 32)\\) system under the official protocol and used it as our baseline, referred to as AASIST Standard \\((\\mathrm{C} = 32)\\) in Table III, achieving an EER of \\(2.79\\%\\) which is close to the originally reported \\(2.70\\%\\) [16]. Under the 'w/o ACE B.F.' condition, the best run achieves an EER of \\(3.30\\%\\) with an average of \\(3.36\\%\\) across three runs. Further experiments show that scaling up the AASIST model does not improve mean EER, possibly due to parameter redundancy." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.719, + 0.92, + 0.944 + ], + "angle": 0, + "content": "We additionally evaluate several widely-used baseline systems, including ResNet [19], Res2Net [18], and ECAPATDNN [34]. ECAPA-TDNN and ResNet achieve EERs of \\(3.74\\%\\) and \\(3.61\\%\\), respectively, which are slightly worse than that of AASIST. In contrast, Res2Net benefits from the advantages of multi-scale feature extraction, delivering the best average performance among the baseline systems with an EER of \\(3.24\\%\\). Our proposed Nes2Net outperforms all baseline systems, achieving a mean EER of \\(2.55\\%\\) with the lowest computational cost. Furthermore, the enhanced version, Nes2Net-X, further improves the performance to \\(2.51\\%\\) EER, marking the best single-model performance reported to date. Compared to Res2Net, ResNet, ECAPA-TDNN, and SOTA AASIST (\\(C = 32\\)), Nes2Net-X achieves EER reductions of \\(23\\%\\), \\(30\\%\\), \\(33\\%\\), and \\(25\\%\\), respectively." + }, + { + "type": "footer", + "bbox": [ + 0.091, + 0.932, + 0.355, + 0.944 + ], + "angle": 0, + "content": "4https://github.com/nii-yamagishilab/PartialSpoof" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.454, + 0.042 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "8" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.072, + 0.921, + 0.119 + ], + "angle": 0, + "content": "TABLE IV PERFORMANCE IN EER \\((\\%)\\) ON THE CTRSVDD EVALUATION SET [46], COMPARING THE PROPOSED NES2NET WITH RES2NET AND ITS VARIOUS VARIANTS. THE RESULTS ARE REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS, E.G., 3.02 (3.24) IN THE FIRST ROW, OR AS THE RESULT OF A SINGLE EXPERIMENT, E.G., 3.21 IN THE SECOND ROW. 'B' AND 'S' REPRESENT THE NUMBER OF BLOCKS AND SCALE OF RES2NET, RESPECTIVELY." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.128, + 0.917, + 0.372 + ], + "angle": 0, + "content": "
Back-endDimensionality Reduction LayerReduced Dimension DParams.MMACsPooled EERRemarks
w/o ACE. B.F.w/ ACE. B.F.
Res2Net (b=4, s=4)128452k64.933.02 (3.24)2.61 (2.78)
Res2Net (b=4, s=16)128427k59.953.212.80■ increase scale s
Res2Net (b=4, s=64)128419k58.283.152.74
Res2Net (b=4, s=128)128417k57.983.262.88
Res2Net (b=4, s=4)64180k23.254.323.76change D
Res2Net (b=4, s=4)2561,273k202.913.833.38
Res2Net-woDR (b=1, s=4)×-861k119.154.153.62
Res2Net-woDR (b=1, s=8)×-615k70.124.233.71
Res2Net-woDR (b=1, s=16)×-456k38.243.823.35remove dimensionality reduction layer and increase scale s
Res2Net-woDR (b=1, s=32)×-367k20.452.98 (3.45)2.56 (3.02)
Res2Net-woDR (b=1, s=64)×-320k11.102.73 (2.97)2.42 (2.61)
Res2Net-woDR (b=1, s=128)×-296k6.313.292.88
Res2Net-woDR (b=1, s=256)×-284k3.883.573.13
Res2Net-woDR (b=2, s=64)×-637k21.783.202.82increase depth
Res2Net-woDR (b=4, s=64)×-1,270k43.153.09 (3.18)2.73 (2.83)
Proposed Nes2Net×-511k58.112.53 (2.55)2.22 (2.27)proposed nested design
Proposed Nes2Net-X×-511k91.352.48 (2.51)2.20 (2.24)
" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.387, + 0.492, + 0.508 + ], + "angle": 0, + "content": "We also analyze performance across different synthetic attack types using the 'w/o ACE B.F.' protocol. Except for the 'A12' attack type [46], our model consistently achieves either the best or second-best performance, demonstrating strong generalization and robustness. Notably, the 'A12' attack type, based on Singing Voice Synthesis (SVS), proves particularly challenging, showing higher EER across all models and highlighting a potential area for future improvement." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.508, + 0.492, + 0.614 + ], + "angle": 0, + "content": "We observe that performance trends are consistent across both conditions, with and without ACESinger bona fide samples. Moreover, the EER is lower when ACESinger bona fide samples are included. This indicates that, even though ACESinger bona fide samples are considered out-of-domain, the trained models exhibit strong generalization capabilities and are able to classify these samples accurately." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.626, + 0.303, + 0.641 + ], + "angle": 0, + "content": "B. The Roadmap of the Nes2Net" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.644, + 0.491, + 0.764 + ], + "angle": 0, + "content": "In this section, we introduce the roadmap from Res2Net to the proposed Nes2Net, with detailed results summarized in Table IV. All systems are implemented and evaluated under a unified framework for fair comparison. To aid interpretation, we visualize the number of parameters, MACs, and EER. These are represented in Fig. 3 by circle size, the horizontal axis, and the vertical axis, respectively. In the following, we provide detailed analyses:" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.765, + 0.492, + 0.915 + ], + "angle": 0, + "content": "Investigating Res2Net: Among the baselines in Table III, the Res2Net-based back-end outperforms ResNet, AASIST, and ECAPA-TDNN on the CtrlSVDD dataset. Therefore, we select it as the reference baseline for further investigation. First, we experiment with adjusting the scale \\( s \\) of Res2Net. We observe that as \\( s \\) increases, the number of split groups increases linearly; however, the performance shows no significant improvement (depicted as the teal blue line in Fig. 3). This may be because adding too many split groups dilutes the feature representation, leading to redundancy." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.915, + 0.492, + 0.946 + ], + "angle": 0, + "content": "Next, we explore varying the dimensionality of the output features from the DR layer (referred to as Reduced Dimension" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.386, + 0.922, + 0.753 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.504, + 0.757, + 0.922, + 0.783 + ], + "angle": 0, + "content": "Fig. 3. Visualization of Table III and IV, highlighting our exploration of Res2Net and the roadmap of architectural changes leading to Nes2Net." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.794, + 0.922, + 0.93 + ], + "angle": 0, + "content": "\\(D\\), depicted as the steel gray line in Fig. 3). Reducing \\(D\\) to 64 significantly lowers model size and MACs, compared to the default \\(D = 128\\), but leads to substantial performance degradation, increasing EER from \\(3.02\\%\\) to \\(4.32\\%\\). Conversely, increasing \\(D\\) to 256 results in a much larger model size and MACs but still leads to worse performance than \\(D = 128\\). This may be because a larger \\(D\\) introduces over-parameterization and noise. This may explain why \\(D = 128\\) is commonly adopted in SOTA models [15], [16]." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.93, + 0.922, + 0.946 + ], + "angle": 0, + "content": "Removal of DR Layer: Foundation models often incorpo" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.454, + 0.042 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "9" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.493, + 0.19 + ], + "angle": 0, + "content": "rate a DR layer in their back-end architecture to compress high-dimensional features into lower-dimensional representations, facilitating downstream tasks. For instance, models like wav2vec 2.0-AASIST [15] utilize such a layer alongside task-specific classifiers (e.g., AASIST, ResNet). However, as discussed in Section II-E, this projection layer consumes a substantial portion of the back-end model's parameters and MACs while potentially causing information loss." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.191, + 0.493, + 0.312 + ], + "angle": 0, + "content": "To explore whether bypassing this layer preserves more task-relevant information, we propose a new back-end model: ResNet without Dimensionality Reduction (ResNet-woDR). By directly processing high-dimensional features, ResNet-woDR simplifies the architecture and focuses on the raw features extracted by the speech foundation model. The naming emphasizes the absence of a DR layer, differentiating it from traditional approaches." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.313, + 0.493, + 0.405 + ], + "angle": 0, + "content": "We further evaluate the performance of ResNet-woDR with different scales \\( s \\) (depicted as the green line in Fig. 3). The best performance is observed with \\( s = 64 \\), achieving a mean EER of \\( 2.97\\% \\), which surpasses the best Res2Net baseline. Increasing \\( s \\) beyond this point leads to a decline in performance, likely due to the following factors:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.41, + 0.493, + 0.455 + ], + "angle": 0, + "content": "- Feature Dilution. A large \\(s\\) excessively fragments feature representations, weakening their expressiveness and resulting in diluted, less informative features [18]." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.455, + 0.493, + 0.5 + ], + "angle": 0, + "content": "- Redundant Transformations. An overly large \\(s\\) introduces unnecessary feature transformations, leading to overfitting and reduced generalization [43]." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.5, + 0.493, + 0.576 + ], + "angle": 0, + "content": "- Restricted Feature Interaction. Since channels are unordered, distant groups may still contain correlated information. In this case, the additional convolutional layers introduced by splitting limit their interactions, weakening the model's ability to capture complex patterns." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.41, + 0.493, + 0.576 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.58, + 0.493, + 0.686 + ], + "angle": 0, + "content": "Based on the optimal \\( s \\), we increase the number of blocks \\( b \\) to deepen the model (depicted as the light pink line in Fig. 3). However, no further performance improvement is observed. This could be attributed to the deeper architecture's limited ability to effectively utilize the additional parameters, resulting in diminishing performance gains. It may also increase the risk of overfitting." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.687, + 0.493, + 0.793 + ], + "angle": 0, + "content": "The Novel Nested Design: Prior experiments demonstrate that removing the DR layer enhances the performance of Res2Net. We believe that directly extracting information from high-dimensional speech foundation model features avoids the information loss introduced by DR. Our experiments with variations in scale, depth, and dimensionality show that a mean EER of \\(2.97\\%\\) marks a performance bottleneck for this design." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.794, + 0.493, + 0.945 + ], + "angle": 0, + "content": "Compared to ResNet-woDR, the proposed Nes2Net adopts a novel nested design that enhances flexibility and significantly boosts the model's representational capacity. Processing larger feature subsets in the outer layer facilitates better interactions across channels within each nested layer. Furthermore, the integrated local cross-channel attention mechanism enhances feature selection while mitigating redundancy, addressing limitations in prior designs. This architectural refinement overcomes the performance limitations observed in the original Res2Net design. As a result, Nes2Net and its enhanced variant" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.069, + 0.922, + 0.101 + ], + "angle": 0, + "content": "Nes2Net-X surpass the earlier performance bottleneck, achieving mean EERs of \\(2.55\\%\\) and \\(2.51\\%\\), respectively." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.118, + 0.792, + 0.133 + ], + "angle": 0, + "content": "C. Studies on the ASVspoof 2021 dataset" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.139, + 0.92, + 0.243 + ], + "angle": 0, + "content": "TABLE V PERFORMANCE IN EER \\((\\%)\\) ON THE ASVspoof 2021 LA AND DF. THE RESULTS ARE REPORTED IN THE FORMAT OF 'BEST (MEAN). CKPT AVG. REFERS TO THE NUMBER OF CHECKPOINTS AVERAGED. \\(\\ddagger\\) DENOTES RE-IMPLEMENTATION CONDUCTED BY US. 'ALGO4' AND 'ALGO5' REPRESENT RAWBOOST SERIES AUGMENTATIONS: \\((1 + 2 + 3)\\) AND \\((1 + 2)\\) [55], RESPECTIVELY. PARAMETERS THAT ARE UNDERlined ARE CALCULATED BY US. \\(-\\) REPRESENTS UNKNOWN. N/A INDICATES THAT THE SYSTEM DOES NOT USE THE AVERAGE CHECKPOINTS METHOD." + }, + { + "type": "table", + "bbox": [ + 0.506, + 0.247, + 0.922, + 0.59 + ], + "angle": 0, + "content": "
RemarkFront-endBack-end ModelBack-end ParametersCKPT Avg.ASVspoof 2021
LADF
2022wav2vec2.0 FIR-NB [61]--3.546.18
2022wav2vec2.0 FIR-WB [61]--7.084.98
2022wav2vec2.0 LGF [62]--9.664.75
2023wav2vec2.0 Conformer (fix) [63]2,506k551.382.27
2023wav2vec2.0 Conformer (var) [63]2,506k50.877.36
2024wav2vec2.0 Ensembling [64] ‡--2.32 (4.48)5.60 (8.74)
2024WavLMASP+MLP [65]1,051k-3.314.47
2024wav2vec2.0 SLIM [14]---(4.4)
2024WavLMAttM-LSTM [31]936k6N/A3.503.19
2024wav2vec2.0 FTDKD [66]--2.962.82
2024wav2vec2.0 AASIST2 [67]--1.612.77
2024wav2vec2.0 MFA [68]--5.082.56
2024wav2vec2.0 MoE [69]--2.962.54
2024wav2vec2.0 OCKD [70]--0.902.27
2024wav2vec2.0 TCM [33]2,383k751.032.06
2024wav2vec2.0 SLS [35]23,399k8-2.87 (3.88)1.92 (2.09)
2025wav2vec2.0 LSR+LSA [71]--1.192.43
2025wav2vec2.0 LSR+LSA [71] ※--1.051.86
2025wav2vec2.0 WaveSpec [72]---1.90
2025wav2vec2.0 Mamba [17]1,937k950.931.88
2025wav2vec2.0 SSL-EOW-S. [73] ‡---1.75 (2.91)
2025wav2vec2.0 Cal. Ensemble [73] ‡---(2.03)
2022wav2vec2.0 AASIST [15]447k10N/A0.82 (1.00)2.85 (3.69)
wav2vec2.0 AASIST (algo4)447kN/A1.13 (1.36)3.37 (4.09)
wav2vec2.0 AASIST (algo5)447kN/A0.93 (1.40)3.56 (5.07)
Ourswav2vec2.0 Nes2Net511kN/A1.61 (1.90)1.89 (2.12)
wav2vec2.0 Nes2Net-X511kN/A1.73 (1.95)1.65 (1.91)
wav2vec2.0 Nes2Net-X511k31.66 (1.87)1.54 (1.98)
wav2vec2.0 Nes2Net-X511k51.88 (2.00)1.49 (1.78)
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.507, + 0.589, + 0.685, + 0.609 + ], + "angle": 0, + "content": "\\*: with extra data augmentation [71] \\(\\ddagger\\) : ensemble of multiple models" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.617, + 0.922, + 0.812 + ], + "angle": 0, + "content": "The ASVspoof series datasets are widely used as benchmarks for advancing research in detecting spoofed speech [47], [48]. Following the standard protocol, we train models on ASVspoof 2019 [47] and evaluate them on ASVspoof 2021 Logical Access (LA) and Deepfake (DF) tasks [48]. The LA task focuses on detecting synthetic and voice-converted speech transmitted over telephony systems, introducing challenges related to channel effects and transmission variability. In contrast, the DF task targets detecting manipulated, compressed speech data commonly found on online platforms. This reflects real-world scenarios where deepfake audio circulates, making the DF task a valuable benchmark for evaluating deepfake detection systems." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.813, + 0.922, + 0.859 + ], + "angle": 0, + "content": "The results in Table V show that for the LA track, our Nes2Net achieves a mean EER of \\(1.90\\%\\), comparable to SOTA systems. For the DF track, which more closely reflects" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.868, + 0.911, + 0.882 + ], + "angle": 0, + "content": "\\(^{5}\\)https://github.com/ErosRos/conformer-based-classifier-for-anti-spoofing" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.882, + 0.837, + 0.894 + ], + "angle": 0, + "content": "\\(^{6}\\)https://github.com/pandartialdTJU/AttM_INTERSPEECH24" + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.895, + 0.751, + 0.907 + ], + "angle": 0, + "content": "7https://github.com/ductuantruong/tcm_add" + }, + { + "type": "ref_text", + "bbox": [ + 0.52, + 0.907, + 0.849, + 0.919 + ], + "angle": 0, + "content": "\\(^{8}\\)https://github.com/QiShanZhang/SLSforASVspoof-2021-DF" + }, + { + "type": "ref_text", + "bbox": [ + 0.52, + 0.919, + 0.763, + 0.932 + ], + "angle": 0, + "content": "9https://github.com/swagshaw/XLSR-Mamba" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.932, + 0.802, + 0.945 + ], + "angle": 0, + "content": "\\(^{10}\\)https://github.com/TakHemlata/SSL_Anti-spoofing" + }, + { + "type": "list", + "bbox": [ + 0.516, + 0.868, + 0.911, + 0.945 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.454, + 0.042 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.922, + 0.041 + ], + "angle": 0, + "content": "10" + }, + { + "type": "table_caption", + "bbox": [ + 0.079, + 0.072, + 0.92, + 0.119 + ], + "angle": 0, + "content": "TABLE VI PERFORMANCE IN EER \\((\\%)\\) FOR DIFFERENT TYPES OF VOCODERS AND COMPRESSION CONDITIONS ON THE ASVSPOOF 2021 DF TEST SET. THE FIVE EER VALUES FOR EACH SUB-ITEM, FROM LEFT TO RIGHT, CORRESPOND TO NES2NET-X, MAMBA [17], SLS [35], TCM [33], AND AASIST [15].THE BEST PERFORMANCE IS REPORTED IN BOLD FONTS, AND THE SECOND-BEST IS UNDERLINED." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.123, + 0.921, + 0.552 + ], + "angle": 0, + "content": "
Traditional VocoderWav ConcatenationNeural Autoreg.Neural Non-autoreg.UnknownPooled EER
C1 -0.36/0.78/1.21/0.95/1.220.76/0.76/0.80/0.76/2.282.70/3.88/3.12/3.89/3.450.52/0.87/0.68/0.95/1.561.64/1.63/1.23/1.73/1.991.47/1.89/1.72/2.23/2.34
C2 Low mp31.48/0.94/1.94/1.67/2.722.96/2.20/2.16/2.56/5.842.89/3.23/2.71/3.59/5.961.23/0.86/0.78/1.32/3.332.54/1.69/1.65/1.93/4.301.75/1.84/2.02/2.11/4.30
C3 High mp30.44/0.88/1.39/0.96/1.831.13/1.49/1.17/1.45/3.352.47/3.35/2.91/3.70/3.790.44/0.87/0.69/0.88/2.022.29/1.85/1.34/1.67/2.651.32/1.85/1.59/1.95/2.64
C4 Low m4a0.44/0.95/1.48/1.22/1.571.15/0.85/1.24/1.67/2.092.79/3.39/2.79/3.40/3.750.54/0.96/0.70/1.22/1.651.32/1.22/1.14/1.41/2.101.40/1.92/1.74/2.01/2.37
C5 High m4a0.45/0.80/1.34/0.98/1.160.62/0.76/0.71/0.76/2.102.77/3.48/2.96/3.73/3.390.56/0.90/0.64/1.07/1.341.88/1.70/1.34/1.43/1.871.59/2.05/1.79/1.96/2.14
C6 Low ogg0.69/1.13/2.14/1.44/2.350.80/0.97/0.91/0.91/2.231.92/2.80/2.44/2.79/3.670.48/0.78/0.61/0.84/1.621.05/1.14/1.00/1.01/2.231.09/1.61/1.88/1.87/2.58
C7 High ogg0.70/1.13/1.52/1.35/1.570.62/0.80/0.71/0.80/1.502.05/2.84/2.26/2.66/2.920.43/0.65/0.52/0.74/1.001.34/1.05/0.96/0.96/1.271.35/1.61/1.57/1.74/1.92
C8 mp3→m4a0.95/1.26/2.28/1.74/3.011.52/0.97/1.08/1.08/2.962.22/3.01/2.31/2.96/4.490.61/0.57/0.65/0.95/2.051.61/1.18/1.09/1.18/2.661.48/1.65/1.92/1.97/3.31
C9 ogg→m4a0.70/1.26/2.15/1.49/2.280.88/0.97/0.99/0.88/2.521.92/3.01/2.57/2.88/3.760.52/0.70/0.65/0.78/1.570.96/1.09/1.09/1.05/2.141.13/1.79/2.04/1.88/2.75
Pooled EER0.72/1.14/1.88/1.40/2.151.10/1.05/1.07/1.14/2.852.70/3.32/2.86/3.40/4.050.63/0.80/0.69/0.94/1.841.86/1.43/1.23/1.38/2.451.49/1.88/1.92/2.06/2.85
Traditional VocoderWav ConcatenationNeural AutoregressionNeural Non-autoregressionUnknownPooled EER
C1
C2
C3
C4
C5
C6
C7
C8
C9
Pooled EER
" + }, + { + "type": "table_caption", + "bbox": [ + 0.074, + 0.554, + 0.924, + 0.591 + ], + "angle": 0, + "content": "Fig. 4. Visualization of the EER \\((\\%)\\) across various vocoders and compression conditions on the ASVspoof 2021 DF test set. Each EER value is shown as a colored circle, where the size indicates the EER value, and the color represents the performance ranking among the five models: blue (best) to light red (worst). The five EER values for each sub-item, from left to right, correspond to the proposed Nes2Net-X, Mamba [17], SLS [35], TCM [33], and AASIST [15]." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.607, + 0.492, + 0.788 + ], + "angle": 0, + "content": "real-world scenarios as discussed earlier, the baseline system AASIST [15] achieves its best EER of \\(2.85\\%\\) and a mean EER of \\(3.69\\%\\), remaining competitive with current SOTA systems. The SLS [35] and TCM [33] models achieve EERs close to \\(2\\%\\), demonstrating strong performance at the SOTA level. The Mamba-based [17] model further improves results, reducing the EER to \\(1.88\\%\\). Notably, our proposed Nes2Net attains its best EER of \\(1.89\\%\\) and a mean EER of \\(2.12\\%\\) EER, comparable to the performance of current SOTA systems. The enhanced variant, Nes2Net-X achieves the best performance among all compared systems, with its best EER of \\(1.65\\%\\) and a mean EER of \\(1.91\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.79, + 0.491, + 0.927 + ], + "angle": 0, + "content": "Inspired by prior works [17], [33], we average the weights of several top-performing checkpoints on the validation set to obtain an improved model. This approach further improves the performance of the DF task to a best EER of \\(1.49\\%\\) and a mean EER of \\(1.78\\%\\), which, to the best of our knowledge, is the best performance reported to date. Furthermore, compared to Mamba [17], our model achieves this performance with approximately \\(74\\%\\) fewer parameters, demonstrating superior efficiency." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.93, + 0.492, + 0.946 + ], + "angle": 0, + "content": "The analysis above summarizes overall performance on the" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.606, + 0.923, + 0.728 + ], + "angle": 0, + "content": "DF test set. The DF dataset also provides detailed labels for vocoder types and compression conditions, enabling more fine-grained analysis. To further evaluate performance, we compare the SOTA models Mamba, SLS, TCM, and AASIST with our proposed Nes2Net-X across these sub-tracks. The results are presented in Table VI. To improve readability and make the extensive numerical data easier to interpret, we also visualize the table's results in Fig. 4." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.73, + 0.923, + 0.927 + ], + "angle": 0, + "content": "For traditional vocoders, all models perform well, with most EERs below \\(2\\%\\). Notably, our proposed Nes2Net-X achieves exceptional results, consistently yielding EERs under \\(1\\%\\) across all conditions except C2. This demonstrates the strong stability of Nes2Net-X when handling unseen and relatively simple scenarios. In contrast, for neural autoregressive vocoders, all models experience a noticeable drop in performance, with EER reaching up to \\(5.96\\%\\). This indicates the greater challenge posed by the sequential and dynamic nature of autoregressive vocoders, which introduce higher variability in synthesis. Nevertheless, Nes2Net-X maintains a clear advantage over the competing models, demonstrating its robustness in handling these complex synthesis conditions." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.93, + 0.922, + 0.946 + ], + "angle": 0, + "content": "From the perspective of compression conditions, the differ" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.454, + 0.042 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "11" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.492, + 0.16 + ], + "angle": 0, + "content": "ences in model performance are less pronounced compared to the variations observed across vocoder types. Nes2Net-X consistently achieves the lowest EERs across all compression conditions, regardless of the level of distortion introduced by compression. This consistency highlights the model's strong generalization ability across different levels of compressions." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.16, + 0.492, + 0.265 + ], + "angle": 0, + "content": "Overall, these findings demonstrate that Nes2Net-X is not only highly effective across diverse vocoder types, but also maintains superior performance under varying compression conditions. This robustness underscores the model's capability to handle both compression diversity and complex synthesis challenges, making it a reliable solution for deepfake audio detection across a wide range of scenarios." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.278, + 0.36, + 0.292 + ], + "angle": 0, + "content": "D. The results on the In-the-Wild dataset" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.295, + 0.491, + 0.332 + ], + "angle": 0, + "content": "TABLE VII PERFORMANCE IN EER \\((\\%)\\) ON THE IN-THE-WILD [50] DATASET. OUR RESULT IS REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS." + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.338, + 0.491, + 0.524 + ], + "angle": 0, + "content": "
Front-endYearBack-endEER
wav2vec 2.02022Rawnet&ASSIST (reported by [35])10.46
2024SLIM [14]- (12.5)
2024MoE [69]9.17
2024Conformer [63]8.42
2024TCM [33]7.79
2024OCKD [70]7.68
2024SLS [35]7.46 (8.87)
2024Pascu et al. [74]- (7.2)
2025Mamba [17]6.71
2025WaveSpec [72]6.58
2025LSR+LSA [71]5.92
2025LSR+LSA [71]※5.54
-Proposed Nes2Net5.80 (7.06)
-Proposed Nes2Net-X5.52 (6.60)
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.117, + 0.524, + 0.321, + 0.535 + ], + "angle": 0, + "content": "\\(\\text{※}\\) with extra data augmentation [71]" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.538, + 0.491, + 0.628 + ], + "angle": 0, + "content": "The In-the-Wild dataset [50] is a collection of deepfake videos sourced from the internet. Unlike controlled datasets, it captures the diverse and unpredictable nature of real-world scenarios. This diversity is essential for developing and evaluating deepfake detection models, as it challenges them to generalize effectively across a wide range of conditions." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.629, + 0.492, + 0.749 + ], + "angle": 0, + "content": "In addition, unlike many other datasets that rely on self-generated fake audio, this dataset is collected from publicly available video and audio files explicitly labeled as audio deepfakes [50]. To account for the potential presence of partial spoofing, we evaluate our proposed Nes2Net and Nes2Net-X using the entire duration of each test sample instead of restricting it to the first 4 seconds, as the latter approach risks missing partially spoofed segments." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.749, + 0.492, + 0.884 + ], + "angle": 0, + "content": "The testing results, alongside SOTA models, are reported in Table VII. We find that the overall performance trends are consistent with those seen on the ASVspoof 2021 DF dataset. However, EERs on the In-the-Wild dataset are generally higher than those on the DF dataset, reflecting greater complexity and variability in real-world scenarios. Notably, the proposed Nes2Net-X outperforms all SOTA models, achieving the lowest EER of \\(5.52\\%\\) and a mean EER of \\(6.60\\%\\) on this challenging dataset." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.896, + 0.363, + 0.912 + ], + "angle": 0, + "content": "E. The results on the ASVspoof 5 dataset" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.915, + 0.492, + 0.945 + ], + "angle": 0, + "content": "The ASVspoof 5 dataset represents the most recent edition in the ASVspoof series. Unlike earlier versions, it introduces" + }, + { + "type": "table_caption", + "bbox": [ + 0.511, + 0.062, + 0.916, + 0.143 + ], + "angle": 0, + "content": "TABLE VIII A COMPARISON BETWEEN THE PROPOSED NES2NET AND THE AASIST BASELINE SYSTEM ON THE ASVSPOOF 5 DATASET [49]. 'PARAMS.' AND 'MMACs' REFER TO THE NUMBER OF PARAMETERS AND THE NUMBER OF MILLION MULTIPLY-ACCUMULATE OPERATIONS, RESPECTIVELY. 'AVG.' INDICATES THE AVERAGE RELATIVE PERFORMANCE IMPROVEMENT ACROSS ALL THREE EVALUATION METRICS." + }, + { + "type": "table", + "bbox": [ + 0.511, + 0.147, + 0.918, + 0.235 + ], + "angle": 0, + "content": "
Back-endPerformance
ModelParams.↓MMACs↓CLLR↓minDCF↓EER↓Avg.
AASIST447k707.650.95870.16456.08Benchmark
Nes2Net511k58.110.79120.15686.137.1%
Nes2Net-X511k91.350.73440.15355.9210.9%
" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.253, + 0.922, + 0.494 + ], + "angle": 0, + "content": "adversarial attacks and is crowdsourced under various acoustic conditions [49]. As it is newly released, there are currently no existing systems available for a fair comparison. Therefore, we re-implement the AASIST system as a baseline and compare it with our proposed Nes2Net and Nes2Net-X model. Following the ASVspoof 5 challenge guidelines [49], we use WavLM [3] as the front-end. Based the evaluation protocol in [37], we assess performance using three metrics: Cost of Log-Likelihood Ratio (CLLR), minimum Detection Cost Function (minDCF), and EER, and present the results in Table VIII. We observe that the Nes2Net and Nes2Net-X backend models result in only a slight increase in the number of parameters compared to AASIST, while significantly reducing MMMs. Moreover, across all three evaluation metrics, the Nes2Net and Nes2Net-X back-ends improve performance by \\(7.1\\%\\) and \\(10.9\\%\\), receptively." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.518, + 0.794, + 0.535 + ], + "angle": 0, + "content": "F. The results on the PartialSpoof dataset" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.548, + 0.915, + 0.595 + ], + "angle": 0, + "content": "TABLE IX PERFORMANCE IN EER \\((\\%)\\) ON THE PARTIALSPOOF [51] DATASET. THE RESULTS ARE REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS. \\(\\dagger\\) INDICATES RESULTS OBTAINED FROM OUR IMPLEMENTATION." + }, + { + "type": "table", + "bbox": [ + 0.517, + 0.6, + 0.913, + 0.728 + ], + "angle": 0, + "content": "
Front-endYearBack-endPartialSpoof [51]
DevEval
wav2vec 2.02024gMLP [51]0.350.64
-gMLP†0.39 (0.43)0.72 (0.80)
20241D Res2Net [59]0.350.73
-1D Res2Net†0.35 (0.38)0.73 (0.79)
-SE ResNet†0.31 (0.50)0.77 (0.78)
-Nes2Net0.24 (0.36)0.53 (0.68)
-Nes2Net-X0.20 (0.33)0.57 (0.64)
" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.748, + 0.921, + 0.839 + ], + "angle": 0, + "content": "Partially manipulating a sentence can significantly alter its intended meaning [59]. When such manipulations occur in small regions, existing models trained on fully spoofed speech and relying on pooling functions struggle to detect these subtle changes. Consequently, there is growing interest in the detection of partially spoofed speech [51], [59], [75]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.84, + 0.922, + 0.945 + ], + "angle": 0, + "content": "To evaluate the performance of our proposed model across different spoofing tasks, we conduct experiments on the PartialSpoof dataset [51]. The results are presented in Table IX. First, we reproduce the performance of two SOTA models, achieving results comparable to those reported in their original papers [51], [59]. Additionally, we evaluate SE ResNet, which demonstrated performance similar to the other baselines. In" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.454, + 0.042 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "12" + }, + { + "type": "table_caption", + "bbox": [ + 0.075, + 0.071, + 0.923, + 0.119 + ], + "angle": 0, + "content": "TABLE X THE PERFORMANCE IN EER \\((\\%)\\) ON THE ASVspoof 2021 LA, DF [48], AND IN-THE-WILD [50] DATASETS. THE RESULTS ARE REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS. W/ AUG.' AND W/O AUG.' INDICATE WHETHER EVALUATION WITH AUGMENTATIONS ON THE VALIDATION SET IS USED TO SELECT THE BEST CHECKPOINT FOR TESTING. CKPT Avg. REFERS TO THE NUMBER OF CHECKPOINTS AVERAGED." + }, + { + "type": "table", + "bbox": [ + 0.106, + 0.122, + 0.89, + 0.205 + ], + "angle": 0, + "content": "
Back-endTrain SetCKPT Avg.w/ Aug.w/o Aug.
21LA [48]21DF [48]In-the-Wild [50]21LA [48]21DF [48]In-the-Wild [50]
Nes2Net-XASVspoof 19 [47]N/A1.63 (1.79)1.84 (2.03)5.56 (6.61)1.73 (1.95)1.65 (1.91)5.73 (6.83)
31.70 (1.80)1.88 (1.98)5.15 (6.31)1.66 (1.87)1.54 (1.98)5.59 (6.90)
51.67 (1.78)1.80 (1.91)5.28 (6.31)1.88 (2.00)1.49 (1.78)5.52 (6.60)
" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.221, + 0.492, + 0.251 + ], + "angle": 0, + "content": "contrast, our proposed Nes2Net and Nes2Net-X outperform all three baselines." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.274, + 0.383, + 0.289 + ], + "angle": 0, + "content": "G. Empirical Runtime and Memory Analysis" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.294, + 0.491, + 0.4 + ], + "angle": 0, + "content": "Number of parameters and MMACs are widely adopted metrics for evaluating model efficiency. These platform-independent measures offer consistent and fair comparisons across different hardware. However, to better reflect the real-world deployment costs of back-end architectures, we additionally benchmark their training time, inference time, and peak GPU memory usage, as summarized in Table XI." + }, + { + "type": "table_caption", + "bbox": [ + 0.078, + 0.418, + 0.49, + 0.476 + ], + "angle": 0, + "content": "TABLE XI TRAINING AND INFERENCE EFFICIENCY COMPARISON ACROSS BACK-END MODELS. THE TABLE REPORTS THE AVERAGE (AVG.) TRAINING AND INFERENCE TIME PER BATCH IN MILLSECONDS (MS/BATCH), AS WELL AS PEAK GPU MEMORY USAGE IN MEGABYTES (MB)." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.482, + 0.5, + 0.645 + ], + "angle": 0, + "content": "
Back-endAvg. Time (ms/batch)↓Peak GPU Memory↓ (MB)
TrainingInference
AASIST Light (C=24)27.07.81,327
AASIST Standard(C=32)53.818.73,454
AASIST Large(C=40)79.228.14,273
AASIST XL(C=48)86.130.75,087
AASIST XXL(C=56)100.937.45,905
ResNet7.82.6691
Res2Net15.63.5721
ECAPA-TDNN (C=128)9.43.1698
Proposed Nes2Net20.24.91,312
Proposed Nes2Net-X29.19.22,231
" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.658, + 0.491, + 0.763 + ], + "angle": 0, + "content": "All back-end models are evaluated under identical conditions: input features of 400 frames with 1024 dimensions, a batch size of 64, and execution on a dedicated NVIDIA H20 GPU. The first 10 batches are used for warm-up and excluded from the measurement, and the inference and training times are averaged over the subsequent 200 batches. Training time includes the forward, backward, and optimizer update steps." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.763, + 0.492, + 0.869 + ], + "angle": 0, + "content": "The results show that AASIST models exhibit rapidly increasing runtime and memory consumption as the channel dimension \\( C \\) grows. In contrast, our proposed Nes2Net achieves notably lower latency and memory usage. Nes2Net-X further improves performance in some settings by preserving more high-dimensional information, albeit at the cost of higher resource consumption." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.87, + 0.492, + 0.947 + ], + "angle": 0, + "content": "Conventional models such as ResNet, Res2Net, and ECAPA-TDNN offer faster runtime and smaller memory footprints than our proposed method, but fall short in detection accuracy as shown in earlier experiments. Therefore, when selecting a back-end architecture, we believe both Nes2Net" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.221, + 0.921, + 0.282 + ], + "angle": 0, + "content": "and Nes2Net-X offer flexible options: the former prioritizes efficiency, while the latter favors accuracy when computational resources permit. This underscores the importance of balancing performance and efficiency in real-world applications." + }, + { + "type": "title", + "bbox": [ + 0.504, + 0.295, + 0.866, + 0.31 + ], + "angle": 0, + "content": "H. Should We Use Augmentation During Validation?" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.313, + 0.921, + 0.494 + ], + "angle": 0, + "content": "In all previous experiments, the datasets are split into three non-overlapping subsets: training, validation (or development), and test sets. The validation set is used to select the best-performing checkpoints for final evaluation on the test set. The training set typically applies data augmentation to enhance model performance and generalization. However, the use of augmentation during validation remains inconsistent across prior studies. For instance, wav2vec 2.0-AASIST [15] applies the same augmentation strategy to both training and validation sets. In contrast, WavLM-AASIST [16] does not use augmentation on the validation set, aligning with common practices in speaker verification research [34], [76], [77]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.495, + 0.92, + 0.584 + ], + "angle": 0, + "content": "In this section, we compare these two approaches and report the results in Table X. We observe that applying the same augmentation to the validation set as in the training set leads to worse performance on ASVspoof 2021 DF [48], but better results on In-the-Wild [50]. When no augmentation is applied to the validation set, the opposite trend is observed." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.584, + 0.92, + 0.704 + ], + "angle": 0, + "content": "From the outcome of the above study, we believe that in cases where robustness to certain variations (e.g., noise, compression, or distortions) is important, applying augmentation during validation provides insights into how well the model handles such conditions. As a result, the selected checkpoints from this approach may generalize better to these variations. Further investigation into this topic may yield deeper insights for future work." + }, + { + "type": "title", + "bbox": [ + 0.647, + 0.716, + 0.78, + 0.729 + ], + "angle": 0, + "content": "VI. CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.734, + 0.921, + 0.9 + ], + "angle": 0, + "content": "In this work, we propose Nested Res2Net (Nes2Net) and its enhanced variant, Nes2Net-X, as lightweight and dimensionality reduction (DR) layer-free back-end architectures designed for speech anti-spoofing in the era of foundation models. Unlike conventional approaches that rely on a DR layer to bridge the mismatch between high-dimensional features and downstream classifiers, our proposed architectures directly process these rich representations. This not only eliminates the computational and parameter overhead introduced by DR layers but also avoids information loss, enhancing overall system efficiency and robustness." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.9, + 0.922, + 0.947 + ], + "angle": 0, + "content": "Nes2Net incorporates a novel nested multi-scale design that enables more effective feature extraction and deeper cross-channel interactions without increasing model complexity." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.454, + 0.042 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "13" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.492, + 0.116 + ], + "angle": 0, + "content": "The improved Nes2Net-X further strengthens representation learning by introducing learnable weighted feature fusion, offering adaptive control over the feature aggregation process." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.115, + 0.492, + 0.235 + ], + "angle": 0, + "content": "We conduct extensive evaluations across five representative datasets: CtrSVDD, ASVspoof 2021, ASVspoof 5, Partial-Spoof, and In-the-Wild, covering a wide range of singing voice deepfakes, fully spoofed speech, adversarial attacks, real-world deepfakes, and partially spoofed speech. Across all scenarios, our models achieve SOTA performance, demonstrating superior generalization, compactness, and resilience under unseen and challenging conditions." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.235, + 0.492, + 0.326 + ], + "angle": 0, + "content": "In summary, Nes2Net and Nes2Net-X offer a general-purpose, resource-efficient back-end for foundation model-based speech anti-spoofing, providing a practical yet powerful alternative to DR-dependent designs. To facilitate future research and applications, we make all source code and pretrained models publicly available." + }, + { + "type": "title", + "bbox": [ + 0.236, + 0.333, + 0.333, + 0.346 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.352, + 0.49, + 0.398 + ], + "angle": 0, + "content": "[1] A. Baevski, Y. Zhou, A. Mohamed, and M. Auli, \"wav2vec 2.0: A framework for self-supervised learning of speech representations,\" in Proc. Adv. Neural Inf. Process. Syst. (NeurIPS), vol. 33, 2020, pp. 12449-12460." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.399, + 0.492, + 0.444 + ], + "angle": 0, + "content": "[2] W.-N. Hsu, B. Bolte, Y.-H. H. Tsai, K. Lakhotia, R. Salakhutdinov, and A. Mohamed, \"HuBERT: Self-supervised speech representation learning by masked prediction of hidden units,\" IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 29, pp. 3451-3460, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.444, + 0.492, + 0.5 + ], + "angle": 0, + "content": "[3] S. Chen, C. Wang, Z. Chen, Y. Wu, S. Liu, Z. Chen, J. Li, N. Kanda, T. Yoshioka, X. Xiao, J. Wu, L. Zhou, S. Ren, Y. Qian, Y. Qian, J. Wu, M. Zeng, X. Yu, and F. Wei, \"WavLM: Large-scale self-supervised pre-training for full stack speech processing,\" IEEE J. Sel. Top. Signal Process., vol. 16, no. 6, pp. 1505-1518, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.5, + 0.492, + 0.535 + ], + "angle": 0, + "content": "[4] A. T. Liu, S.-W. Li, and H.-y. Lee, “TERA: Self-supervised learning of transformer encoder representation for speech,” IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 29, pp. 2351-2366, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.535, + 0.492, + 0.569 + ], + "angle": 0, + "content": "[5] J. Zhao and W.-Q. Zhang, \"Improving automatic speech recognition performance for low-resource languages with self-supervised models,\" IEEE J. Sel. Top. Signal Process., vol. 16, no. 6, pp. 1227-1241, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.569, + 0.492, + 0.625 + ], + "angle": 0, + "content": "[6] J. weon Jung, W. Zhang, J. Shi, Z. Aldeneh, T. Higuchi, A. Gichamba, B.-J. Theobald, A. Hussen Abdelaziz, and S. Watanabe, \"ESPnet-SPK: full pipeline speaker embedding toolkit with reproducible recipes, self-supervised front-ends, and off-the-shelf models,\" in Proc. INTERSPEECH, 2024, pp. 4278-4282." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.625, + 0.492, + 0.648 + ], + "angle": 0, + "content": "[7] M. Li, Y. Ahmadiadli, and X.-P. Zhang, \"A survey on speech deepfake detection,\" ACM Comput. Surv., vol. 57, no. 7, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.648, + 0.492, + 0.692 + ], + "angle": 0, + "content": "[8] N. M. Müller, P. Kawa, W. H. Choong, E. Casanova, E. Gölle, T. Müller, P. Syga, P. Sperl, and K. Böttinger, \"MLAAD: The multi-language audio anti-spoofing dataset,\" in Proc. Int. Jt. Conf. Neural Netw. (IJCNN), 2024, pp. 1-7." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.692, + 0.492, + 0.738 + ], + "angle": 0, + "content": "[9] Y. Xie, Y. Lu, R. Fu, Z. Wen, Z. Wang, J. Tao, X. Qi, X. Wang, Y. Liu, H. Cheng, L. Ye, and Y. Sun, \"The codecfake dataset and countermeasures for the universally detection of deepfake audio,\" IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 33, pp. 386-400, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.738, + 0.492, + 0.772 + ], + "angle": 0, + "content": "[10] R. K. Das, X. Tian, T. Kinnunen, and H. Li, “The attacker's perspective on automatic speaker verification: An overview,” in Proc. INTERSPEECH, 2020, pp. 4213–4217." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.772, + 0.492, + 0.817 + ], + "angle": 0, + "content": "[11] J.-w. Jung, Y. Wu, X. Wang, J.-H. Kim, S. Maiti, Y. Matsunaga, H.-j. Shim, J. Tian, N. Evans, J. S. Chung, W. Zhang, S. Um, S. Takamichi, and S. Watanabe, \"SpoofCeleb: Speech deepfake detection and SASV in the wild,\" IEEE Open J. Signal Process., vol. 6, pp. 68-77, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.817, + 0.492, + 0.852 + ], + "angle": 0, + "content": "[12] J. Du, X. Chen, H. Wu, L. Zhang, I. Lin, I. Chiu, W. Ren, Y. Tseng, Y. Tsao, J.-S. R. Jang et al., \"CodecFake-Omni: A large-scale codec-based deepfake speech dataset,\" arXiv preprint arXiv:2501.08238, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.852, + 0.492, + 0.874 + ], + "angle": 0, + "content": "[13] X. Chen, H. Wu, R. Jang, and H. yi Lee, \"Singing voice graph modeling for singfake detection,\" in Proc. INTERSPEECH, 2024, pp. 4843-4847." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.874, + 0.492, + 0.907 + ], + "angle": 0, + "content": "[14] Y. Zhu, S. Koppisetti, T. Tran, and G. Bharaj, \"SLIM: Style-linguistics mismatch model for generalized audio deepfake detection,\" in Proc. Adv. Neural Inf. Process. Syst. (NeurIPS), vol. 37, 2024, pp. 67901-67928." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.907, + 0.492, + 0.952 + ], + "angle": 0, + "content": "[15] H. Tak, M. Todisco, X. Wang, J. weon Jung, J. Yamagishi, and N. Evans, \"Automatic speaker verification spoofing and deepfake detection using wav2vec 2.0 and data augmentation,\" in Proc. Odyssey Speaker Lang. Recognit. Workshop, 2022, pp. 112-119." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.352, + 0.492, + 0.952 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.071, + 0.922, + 0.117 + ], + "angle": 0, + "content": "[16] A. Guragain, T. Liu, Z. Pan, H. B. Sailor, and Q. Wang, \"Speech foundation model ensembles for the controlled singing voice deepfake detection (CtrSVDD) challenge 2024,\" in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.118, + 0.922, + 0.151 + ], + "angle": 0, + "content": "[17] Y. Xiao and R. K. Das, \"XLSR-Mamba: A dual-column bidirectional state space model for spoofing attack detection,\" IEEE Signal Process Lett., vol. 32, pp. 1276-1280, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.152, + 0.922, + 0.186 + ], + "angle": 0, + "content": "[18] S.-H. Gao, M.-M. Cheng, K. Zhao, X.-Y. Zhang, M.-H. Yang, and P. Torr, “Res2Net: A new multi-scale backbone architecture,” IEEE Trans. Pattern Anal. Mach. Intell., vol. 43, no. 2, pp. 652-662, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.186, + 0.922, + 0.219 + ], + "angle": 0, + "content": "[19] K. He, X. Zhang, S. Ren, and J. Sun, \"Deep residual learning for image recognition,\" in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2016, pp. 770-778." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.219, + 0.922, + 0.254 + ], + "angle": 0, + "content": "[20] Y. Chen, S. Zheng, H. Wang, L. Cheng, Q. Chen, and J. Qi, \"An enhanced Res2Net with local and global feature fusion for speaker verification,\" in Proc. INTERSPEECH, 2023, pp. 2228-2232." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.254, + 0.922, + 0.298 + ], + "angle": 0, + "content": "[21] Y. Chen, S. Zheng, H. Wang, L. Cheng, Q. Chen, S. Zhang, and J. Li, \"ERes2NetV2: Boosting short-duration speaker verification performance with computational efficiency,\" in Proc. INTERSPEECH, 2024, pp. 3245-3249." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.299, + 0.922, + 0.333 + ], + "angle": 0, + "content": "[22] T. Liu, K. A. Lee, Q. Wang, and H. Li, \"Golden Gemini is all you need: Finding the sweet spots for speaker verification,\" IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 32, pp. 2324-2337, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.333, + 0.922, + 0.367 + ], + "angle": 0, + "content": "[23] X. Li, X. Wu, H. Lu, X. Liu, and H. Meng, \"Channel-wise gated Res2Net: Towards robust detection of synthetic speech attacks,\" in Proc. INTERSPEECH, 2021, pp. 4314-4318." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.367, + 0.922, + 0.389 + ], + "angle": 0, + "content": "[24] J. Kim and S. M. Ban, \"Phase-aware spoof speech detection based on Res2Net with phase network,\" in Proc. ICASSP, 2023, pp. 1-5." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.389, + 0.922, + 0.435 + ], + "angle": 0, + "content": "[25] T. Liu, I. Kukanov, Z. Pan, Q. Wang, H. B. Sailor, and K. A. Lee, \"Towards quantifying and reducing language mismatch effects in cross-lingual speech anti-spoofing,\" in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024, pp. 1185-1192." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.435, + 0.922, + 0.479 + ], + "angle": 0, + "content": "[26] J.-w. Jung, H.-S. Heo, H. Tak, H.-j. Shim, J. S. Chung, B.-J. Lee, H.-J. Yu, and N. Evans, \"AASIST: Audio anti-spoofing using integrated spectro-temporal graph attention networks,\" in Proc. ICASSP, 2022, pp. 6367-6371." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.48, + 0.922, + 0.525 + ], + "angle": 0, + "content": "[27] Y. Chen, J. Yi, J. Xue, C. Wang, X. Zhang, S. Dong, S. Zeng, J. Tao, Z. Lv, and C. Fan, \"RawBMamba: End-to-end bidirectional state space model for audio deepfake detection,\" in Proc. INTERSPEECH, 2024, pp. 2720-2724." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.525, + 0.922, + 0.559 + ], + "angle": 0, + "content": "[28] Y. Chen, H. Wu, N. Jiang, X. Xia, Q. Gu, Y. Hao, P. Cai, Y. Guan, J. Wang, W. Xie et al., \"Ustc-kxdigit system description for asvsproof5 challenge,\" arXiv preprint arXiv:2409.01695, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.559, + 0.922, + 0.593 + ], + "angle": 0, + "content": "[29] Z. Wei, D. Ye, J. Deng, and Y. Lin, “From voices to beats: Enhancing music deepfake detection by identifying forgeries in background,” in Proc. ICASSP, 2025, pp. 1-5." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.593, + 0.922, + 0.627 + ], + "angle": 0, + "content": "[30] Y. Guan, Y. Ai, Z. Li, S. Peng, and W. Guo, \"Recursive feature learning from pre-trained models for spoofing speech detection,\" in Proc. ICASSP, 2025, pp. 1-5." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.627, + 0.922, + 0.661 + ], + "angle": 0, + "content": "[31] Z. Pan, T. Liu, H. B. Sailor, and Q. Wang, \"Attentive merging of hidden embeddings from pre-trained speech model for anti-spoofing detection,\" in Proc. INTERSPEECH, 2024, pp. 2090-2094." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.661, + 0.922, + 0.705 + ], + "angle": 0, + "content": "[32] M. Huaifah, T. Liu, H. B. Sailor, K. M. Tan, T. K. Vangani, Q. Wang, J. H. Wong, N. F. Chen, and A. T. Aw, \"Towards a speech foundation model for Singapore and beyond,\" arXiv preprint arXiv:2412.11538, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.706, + 0.922, + 0.75 + ], + "angle": 0, + "content": "[33] D.-T. Truong, R. Tao, T. Nguyen, H.-T. Luong, K. A. Lee, and E. S. Chng, “Temporal-channel modeling in multi-head self-attention for synthetic speech detection,” in Proc. INTERSPEECH, 2024, pp. 537–541." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.751, + 0.922, + 0.796 + ], + "angle": 0, + "content": "[34] B. Desplanques, J. Thienpondt, and K. Demuynck, \"ECAPA-TDNN: Emphasized channel attention, propagation and aggregation in TDNN based speaker verification,\" in Proc. INTERSPEECH, 2020, pp. 3830-3834." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.797, + 0.922, + 0.831 + ], + "angle": 0, + "content": "[35] Q. Zhang, S. Wen, and T. Hu, \"Audio deepfake detection with self-supervised XLS-R and SLS classifier,\" in Proc. ACM Int. Conf. Multimedia, 2024, pp. 6765-6773." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.831, + 0.922, + 0.865 + ], + "angle": 0, + "content": "[36] Z. Ge, X. Xu, H. Guo, Z. Yang, and B. Schuller, \"Gncl: A graph neural network with consistency loss for segment-level spoofed speech detection,\" in Proc. ICASSP, 2025, pp. 1-5." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.865, + 0.922, + 0.923 + ], + "angle": 0, + "content": "[37] X. Wang, H. Delgado, H. Tak, J. weon Jung, H. jin Shim, M. Todisco, I. Kukanov, X. Liu, M. Sahidullah, T. H. Kinnunen, N. Evans, K. A. Lee, and J. Yamagishi, \"ASVspoof 5: crowdsourced speech data, deepfakes, and adversarial attacks at scale,\" in Autom. Speaker Verif. Spoofing Countermeas. Workshop, 2024, pp. 1-8." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.071, + 0.922, + 0.923 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.078, + 0.03, + 0.454, + 0.042 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "14" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.071, + 0.492, + 0.106 + ], + "angle": 0, + "content": "[38] Y. Zhang, Y. Zang, J. Shi, R. Yamamoto, T. Toda, and Z. Duan, \"SVDD 2024: The inaugural singing voice deepfake detection challenge,\" in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024, pp. 782-787." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.107, + 0.492, + 0.151 + ], + "angle": 0, + "content": "[39] Q. Zhang, S. Wen, F. Yan, T. Hu, and J. Li, \"XWSB: A blend system utilizing XLS-R and WavLM with SLS classifier detection system for SVDD 2024 challenge,\" in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024, pp. 788-794." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.152, + 0.492, + 0.186 + ], + "angle": 0, + "content": "[40] J. Yi, J. Tao, R. Fu, X. Yan, C. Wang, T. Wang, C. Y. Zhang, X. Zhang, Y. Zhao, Y. Ren et al., \"ADD 2023: the second audio deepfake detection challenge,\" arXiv preprint arXiv:2305.13774, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.186, + 0.492, + 0.208 + ], + "angle": 0, + "content": "[41] J. Hu, L. Shen, and G. Sun, \"Squeeze-and-excitation networks,\" in Proc. IEEE Conf. Comput. Vis. Pattern Recog. (CVPR), 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.209, + 0.492, + 0.242 + ], + "angle": 0, + "content": "[42] K. Okabe, T. Koshinaka, and K. Shinoda, \"Attentive statistics pooling for deep speaker embedding,\" in Proc. INTERSPEECH, 2018, pp. 2252-2256." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.243, + 0.492, + 0.276 + ], + "angle": 0, + "content": "[43] T. Zhou, Y. Zhao, and J. Wu, \"ResNeXt and Res2Net structures for speaker verification,\" in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2021, pp. 301-307." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.277, + 0.492, + 0.31 + ], + "angle": 0, + "content": "[44] Q. Wang, B. Wu, P. Zhu, P. Li, W. Zuo, and Q. Hu, \"ECA-Net: Efficient channel attention for deep convolutional neural networks,\" in Proc. IEEE Conf. Comput. Vis. Pattern Recog. (CVPR), 2020, pp. 11531-11539." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.311, + 0.492, + 0.344 + ], + "angle": 0, + "content": "[45] T. Liu, R. K. Das, K. A. Lee, and H. Li, \"MFA: TDNN with multi-scale frequency-channel attention for text-independent speaker verification with short utterances,\" in Proc. ICASSP, 2022, pp. 7517-7521." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.345, + 0.492, + 0.389 + ], + "angle": 0, + "content": "[46] Y. Zang, J. Shi, Y. Zhang, R. Yamamoto, J. Han, Y. Tang, S. Xu, W. Zhao, J. Guo, T. Toda, and Z. Duan, \"CtrSVDD: A benchmark dataset and baseline analysis for controlled singing voice deepfake detection,\" in Proc. INTERSPEECH, 2024, pp. 4783-4787." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.39, + 0.492, + 0.502 + ], + "angle": 0, + "content": "[47] X. Wang, J. Yamagishi, M. Todisco, H. Delgado, A. Nautsch, N. Evans, M. Sahidullah, V. Vestman, T. Kinnunen, K. A. Lee, L. Juvela, P. Alku, Y.-H. Peng, H.-T. Hwang, Y. Tsao, H.-M. Wang, S. L. Maguer, M. Becker, F. Henderson, R. Clark, Y. Zhang, Q. Wang, Y. Jia, K. Onuma, K. Mushika, T. Kaneda, Y. Jiang, L.-J. Liu, Y.-C. Wu, W.-C. Huang, T. Toda, K. Tanaka, H. Kameoka, I. Steiner, D. Matrouf, J.-F. Bonastre, A. Govender, S. Ronanki, J.-X. Zhang, and Z.-H. Ling, \"ASVspoof 2019: A large-scale public database of synthesized, converted and replayed speech,\" Comput. Speech Lang., vol. 64, p. 101114, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.503, + 0.492, + 0.559 + ], + "angle": 0, + "content": "[48] J. Yamagishi, X. Wang, M. Todisco, M. Sahidullah, J. Patino, A. Nautsch, X. Liu, K. A. Lee, T. Kinnunen, N. Evans, and H. Delgado, \"ASVspoof 2021: accelerating progress in spoofed and deepfake speech detection,\" in Autom. Speaker Verif. Spoofing Countermeas. Challenge, 2021, pp. 47-54." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.56, + 0.492, + 0.616 + ], + "angle": 0, + "content": "[49] X. Wang, H. Delgado, H. Tak, J.-w. Jung, H.-j. Shim, M. Todisco, I. Kukanov, X. Liu, M. Sahidullah, T. Kinnunen et al., \"ASVspoof 5: Design, collection and validation of resources for spoofing, deepfake, and adversarial attack detection using crowdsourced speech,\" arXiv preprint arXiv:2502.08857, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.617, + 0.492, + 0.649 + ], + "angle": 0, + "content": "[50] N. M. Müller, P. Czempin, F. Dieckmann, A. Froghyar, and K. Böttinger, \"Does audio deepfake detection generalize?\" in Proc. INTERSPEECH, 2022, pp. 2783-2787." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.65, + 0.492, + 0.695 + ], + "angle": 0, + "content": "[51] L. Zhang, X. Wang, E. Cooper, N. Evans, and J. Yamagishi, \"The PartialProof database and countermeasures for the detection of short fake speech segments embedded in an utterance,\" IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 31, pp. 813-825, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.696, + 0.492, + 0.718 + ], + "angle": 0, + "content": "[52] Y. Zang, Y. Zhang, M. Heydari, and Z. Duan, \"SingFake: Singing voice deepfake detection,\" in Proc. ICASSP, 2024, pp. 12156-12160." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.719, + 0.492, + 0.751 + ], + "angle": 0, + "content": "[53] Y. Xie, J. Zhou, X. Lu, Z. Jiang, Y. Yang, H. Cheng, and L. Ye, \"FSD: An initial chinese dataset for fake song detection,\" in Proc. ICASSP, 2024, pp. 4605-4609." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.752, + 0.492, + 0.786 + ], + "angle": 0, + "content": "[54] T.-Y. Lin, P. Goyal, R. Girshick, K. He, and P. Dollar, “Focal loss for dense object detection,” in IEEE Int. Conf. Comput. Vis. (ICCV), 2017, pp. 2980–2988." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.786, + 0.492, + 0.83 + ], + "angle": 0, + "content": "[55] H. Tak, M. Kamble, J. Patino, M. Todisco, and N. Evans, \"Rawboost: A raw data boosting and augmentation method applied to automatic speaker verification anti-spoofing,\" in Proc. ICASSP, 2022, pp. 6382-6386." + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.071, + 0.492, + 0.83 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.071, + 0.921, + 0.095 + ], + "angle": 0, + "content": "[56] D. P. Kingma and J. Ba, “Adam: A method for stochastic optimization,” in Int. Conf. Learn. Represent., 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.096, + 0.921, + 0.117 + ], + "angle": 0, + "content": "[57] D. Snyder, G. Chen, and D. Povey, “Musan: A music, speech, and noise corpus,” arXiv preprint arXiv:1510.08484, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.118, + 0.921, + 0.162 + ], + "angle": 0, + "content": "[58] T. Ko, V. Peddinti, D. Povey, M. L. Seltzer, and S. Khudanpur, “A study on data augmentation of reverberant speech for robust speech recognition,” in 2017 IEEE international conference on acoustics, speech and signal processing (ICASSP). IEEE, 2017, pp. 5220–5224." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.162, + 0.921, + 0.195 + ], + "angle": 0, + "content": "[59] T. Liu, L. Zhang, R. K. Das, Y. Ma, R. Tao, and H. Li, \"How do neural spoofing countermeasures detect partially spoofed audio?\" in Proc. INTERSPEECH, 2024, pp. 1105-1109." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.196, + 0.921, + 0.228 + ], + "angle": 0, + "content": "[60] X. Wang and J. Yamagishi, “A comparative study on recent neural spoofing countermeasures for synthetic speech detection,” in Proc. INTERSPEECH, 2021, pp. 4259–4263." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.229, + 0.921, + 0.262 + ], + "angle": 0, + "content": "[61] J. M. Martin-Doñas and A. Álvarez, “The Vicomtech audio deepfake detection system based on wav2vec2 for the 2022 ADD challenge,” in Proc. ICASSP, 2022, pp. 9241–9245." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.263, + 0.921, + 0.296 + ], + "angle": 0, + "content": "[62] X. Wang and J. Yamagishi, “Investigating self-supervised front ends for speech spoofing countermeasures,” in Proc. Odyssey Speaker Lang. Recognit. Workshop, 2022, pp. 100–106." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.297, + 0.921, + 0.33 + ], + "angle": 0, + "content": "[63] E. Rosello, A. Gomez-Alanis, A. M. Gomez, and A. Peinado, “A conformer-based classifier for variable-length utterance processing in anti-spoofing,” in Proc. INTERSPEECH, 2023, pp. 5281-5285." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.331, + 0.921, + 0.376 + ], + "angle": 0, + "content": "[64] E. Rosello, A. M. Gomez, I. López-Espejo, A. M. Peinado, and J. M. Martín-Doñas, “Anti-spoofing ensembling model: Dynamic weight allocation in ensemble models for improved voice biometrics security,” in Proc. INTERSPEECH, 2024, pp. 497–501." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.377, + 0.921, + 0.41 + ], + "angle": 0, + "content": "[65] H. M. Tran, D. Guennec, P. Martin, A. Sini, D. Loline, A. Delhay, and P.-F. Marteau, \"Spoofed speech detection with a focus on speaker embedding,\" in Proc. INTERSPEECH, 2024, pp. 2080-2084." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.41, + 0.921, + 0.455 + ], + "angle": 0, + "content": "[66] B. Wang, Y. Tang, F. Wei, Z. Ba, and K. Ren, \"FTDKD: Frequency-time domain knowledge distillation for low-quality compressed audio deepfake detection,\" IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 32, pp. 4905-4918, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.456, + 0.921, + 0.489 + ], + "angle": 0, + "content": "[67] Y. Zhang, J. Lu, Z. Shang, W. Wang, and P. Zhang, “Improving short utterance anti-spoofing with AASIST2,” in Proc. ICASSP, 2024, pp. 11636-11640." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.49, + 0.921, + 0.523 + ], + "angle": 0, + "content": "[68] Y. Guo, H. Huang, X. Chen, H. Zhao, and Y. Wang, \"Audio deepfake detection with self-supervised WavLm and multi-fusion attentive classifier,\" in Proc. ICASSP, 2024, pp. 12702-12706." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.524, + 0.921, + 0.557 + ], + "angle": 0, + "content": "[69] Z. Wang, R. Fu, Z. Wen, J. Tao, X. Wang, Y. Xie, X. Qi, S. Shi, Y. Lu, Y. Liu et al., \"Mixture of experts fusion for fake audio detection using frozen wav2vec 2.0,\" arXiv preprint arXiv:2409.11909, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.557, + 0.921, + 0.591 + ], + "angle": 0, + "content": "[70] J. Lu, Y. Zhang, W. Wang, Z. Shang, and P. Zhang, “One-class knowledge distillation for spoofing speech detection,” in Proc. ICASSP, 2024, pp. 11251-11255." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.592, + 0.921, + 0.625 + ], + "angle": 0, + "content": "[71] W. Huang, Y. Gu, Z. Wang, H. Zhu, and Y. Qian, \"Generalizable audio deepfake detection via latent space refinement and augmentation,\" in Proc. ICASSP, 2025, pp. 1-5." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.626, + 0.921, + 0.648 + ], + "angle": 0, + "content": "[72] Z. Jin, L. Lang, and B. Leng, \"Wave-spectrogram cross-modal aggregation for audio deepfake detection,\" in Proc. ICASSP, 2025, pp. 1-5." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.649, + 0.921, + 0.681 + ], + "angle": 0, + "content": "[73] C. Y. Kwok, D.-T. Truong, and J. Q. Yip, \"Robust audio deepfake detection using ensemble confidence calibration,\" in Proc. ICASSP, 2025, pp. 1-5." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.682, + 0.921, + 0.715 + ], + "angle": 0, + "content": "[74] O. Pascu, A. Stan, D. Oneata, E. Oneata, and H. Cucu, \"Towards generalisable and calibrated audio deepfake detection with self-supervised representations,\" in Proc. INTERSPEECH, 2024, pp. 4828-4832." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.716, + 0.921, + 0.75 + ], + "angle": 0, + "content": "[75] H.-T. Luong, H. Li, L. Zhang, K. A. Lee, and E. S. Chng, “LlamaPartial-Spoof: An LLM-driven fake speech dataset simulating disinformation generation,” arXiv preprint arXiv:2409.14743, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.75, + 0.921, + 0.783 + ], + "angle": 0, + "content": "[76] T. Liu, K. A. Lee, Q. Wang, and H. Li, \"Disentangling voice and content with self-supervision for speaker recognition,\" Proc. Adv. Neural Inf. Process. Syst. (NeurIPS), vol. 36, pp. 50221-50236, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.784, + 0.921, + 0.829 + ], + "angle": 0, + "content": "[77] S. Wang, Z. Chen, K. A. Lee, Y. Qian, and H. Li, “Overview of speaker modeling and its applications: From the lens of deep speaker representation learning,” IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 32, pp. 4971–4998, 2024." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.071, + 0.921, + 0.829 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05657/73cf1d18-a4f6-40ed-812d-0ba2712fd755_origin.pdf b/data/2025/2504_05xxx/2504.05657/73cf1d18-a4f6-40ed-812d-0ba2712fd755_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f1f9002d16593bb07fd2ec80f0383739a578699a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/73cf1d18-a4f6-40ed-812d-0ba2712fd755_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c38ac6549dc60cab20aceb88fa67c5cc23a4dc7712c2f6dbe6f108e04e3a8fd2 +size 1717631 diff --git a/data/2025/2504_05xxx/2504.05657/full.md b/data/2025/2504_05xxx/2504.05657/full.md new file mode 100644 index 0000000000000000000000000000000000000000..e4e9bc9ee24b9527ad4bca2a9487a15b7cf6e496 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/full.md @@ -0,0 +1,458 @@ +The current version is 'Preprint'. + +This work has been submitted to the IEEE for possible publication. Copyright may be transferred without notice, after which this version may no longer be accessible. + +This information aligns with the guidelines available at: + +https://journals.ieeethorcenter.ieee.org/become-an-iiie-journal-author/publishing-ethics/guidelines-and-policies/post-publication-policies/ + +# Nes2Net: A Lightweight Nested Architecture for Foundation Model Driven Speech Anti-spoofing + +Tianchi Liu, Student Member, Duc-Tuan Truong, Student Member, Rohan Kumar Das, Senior Member, Kong Aik Lee, Senior Member, Haizhou Li, Fellow + +Abstract—Speech foundation models have significantly advanced various speech-related tasks by providing exceptional representation capabilities. However, their high-dimensional output features often create a mismatch with downstream task models, which typically require lower-dimensional inputs. A common solution is to apply a dimensionality reduction (DR) layer, but this approach increases parameter overhead, computational costs, and risks losing valuable information. To address these issues, we propose Nested Res2Net (Nes2Net), a lightweight back-end architecture designed to directly process high-dimensional features without DR layers. The nested structure enhances multi-scale feature extraction, improves feature interaction, and preserves high-dimensional information. We first validate Nes2Net on CtrSVDD, a singing voice deepfake detection dataset, and report a $22\%$ performance improvement and an $87\%$ back-end computational cost reduction over the state-of-the-art baseline. Additionally, extensive testing across four diverse datasets: ASVspoof 2021, ASVspoof 5, PartialSpoof, and In-the-Wild, covering fully spoofed speech, adversarial attacks, partial spoofing, and real-world scenarios, consistently highlights Nes2Net's superior robustness and generalization capabilities. The code package and pre-trained models are available at https://github.com/Liu-Tianchi/Nes2Net. + +Index Terms—DeepFake detection, speech anti-spoofing, Res2Net, Nes2Net, SSL, speech foundation model + +# I. INTRODUCTION + +SPEECH foundation models, such as wav2vec 2.0 [1], HuBERT [2], and WavLM [3], have revolutionized speech processing by leveraging large-scale pretraining to capture complex acoustic and linguistic patterns [4]. This has driven notable advances in automatic speech recognition (ASR) [5], speaker verification (SV) [6], and other speech applications. + +Beyond traditional tasks, speech foundation models also show great promise in addressing critical security concerns, particularly speech anti-spoofing (also referred to as deepfake detection) [7]. With the growing sophistication of spoofing techniques, such as voice conversion, ensuring the reliability + +Tianchi Liu and Haizhou Li are with the Department of Electrical and Computer Engineering, National University of Singapore, Singapore. Tianchi Liu is also with LIGHTSPEED, Singapore (email: tianchi.liu@u.nus.edu); + +Duc-Tuan Truong is with the Nanyang Technological University, Singapore (email: truongdu001@e.ntu.edu.sg); + +Rohan Kumar Das is with the Fortemedia Singapore, Singapore (email: ecerohan@gmail.com); + +Kong Aik Lee is with the Department of Electrical and Electronic Engineering and the Research Centre for Data Science & Artificial Intelligence, The Hong Kong Polytechnic University, Hong Kong (e-mail: kongaik.lee@polyu.edu.hk); + +Haizhou Li is also with the Shenzhen Research Institute of Big Data, School of Artificial Intelligence, School of Data Science, The Chinese University of Hong Kong, Shenzhen, China (email: haizhouli@cuhk.edu.cn). + +and security of speech-driven systems has become a pressing concern [8]–[12]. Leveraging the rich representations of these foundation models could significantly improve the robustness and generalization of anti-spoofing systems [13]–[15]. + +While speech foundation models offer exceptional representations, their high-dimensional feature outputs present significant challenges for downstream tasks. Downstream models used in tasks like speech anti-spoofing typically require lower-dimensional features [15]–[17]. To address this mismatch, a common approach is to introduce a dimensionality reduction (DR) layer, usually implemented as a fully connected (FC) layer for transforming high-dimensional features into lower-dimensional features. However, this conventional strategy presents notable drawbacks. Given that downstream classifiers are typically compact [15], [16], the DR layer alone often consumes a substantial portion of the parameters and computational resources within the entire back-end model. Moreover, directly projecting high-dimensional features in a one-shot manner through an FC layer leads to the loss of important information, reducing the effectiveness of speech foundation models. These issues highlight the need for a more efficient and effective solution to bridge the dimensionality gap and fully utilize speech foundation models in downstream tasks. + +To address these challenges, we propose Nested Res2Net (Nes2Net) to process high-dimensional features from speech foundation models, eliminating the need for a DR layer while preserving the richness of the original representations. By addressing key limitations of DR layers, such as excessive computational cost and information loss, Nes2Net offers a more efficient and effective solution. This design makes it particularly suitable for tasks requiring a balance of high performance and efficiency, such as speech anti-spoofing. The key contributions of this work can be summarized as follows: + +- Novel Architecture: We introduce Nes2Net, a new approach that effectively addresses the limitations of DR layers. Nes2Net retains the expressive power of high-dimensional features while reducing model complexity. +- Enhanced Performance, Efficiency, and Generalization: Our method demonstrates a $22\%$ performance gain and an $87\%$ reduction in computational costs compared to the state-of-the-art baselines on the CtrlSVDD dataset. Further experiments conducted on four additional datasets across various scenarios demonstrate strong generalization capability and consistently superior performance. +- Reproducibility: To facilitate further research and application, we make our scripts and pre-trained models publicly available. + +# II. RELATED WORK + +# A. Res2Net + +Res2Net [18] is a well-known architecture designed to extract multi-scale features. Unlike ResNet [19], Res2Net uses hierarchical residual connections within a single block, allowing it to capture patterns across varying receptive fields simultaneously [18]. This design offers proven advantages in speech-related tasks, such as SV [20]–[22] and anti-spoofing [23]–[25], where capturing subtle variations and complex acoustic patterns is important. As shown in Fig. 1, Res2Net (highlighted using a light red block) can also serve as a classifier within a speech foundation model-based anti-spoofing system. Its ability to extract multi-scale features has led to superior performance over conventional models and motivates the design of Nested Res2Net in this work. + +# B. Hand-crafted Feature-based Speech Anti-Spoofing Models + +Hand-crafted acoustic features (such as MFCC) are common choices for many earlier speech anti-spoofing systems. These systems have evolved to effectively detect speech deepfakes [26], [27]. For instance, the Channel-wise Gated Res2Net (CG-Res2Net) [23] introduces a gating mechanism within the Res2Net architecture, enabling dynamic selection of channel-wise features to enhance generalization to unseen attacks. A widely recognized model is AASIST [26], which employs spectro-temporal graph attention layers to capture both temporal and spectral artifacts, thereby achieving efficient and accurate detection. Given AASIST's SOTA performance and its wide adoption in recent anti-spoofing challenges [16], [28], we consider it as our main baseline for evaluation. + +# C. Speech Foundation Models + +Speech foundation models are often referred to as Self-Supervised Learning (SSL) models due to their typical pretraining on large amounts of unlabeled speech data using self-supervised learning techniques. Examples include wav2vec 2.0 [1], HuBERT [2], and WavLM [3]. Unlike hand-crafted acoustic features, which are limited in their ability to adapt to diverse and complex conditions, self-supervised learning (SSL) models learn rich and generalized speech representations that can be effectively adapted to various downstream applications. This allows them to achieve superior performance in speech-related tasks, including speech anti-spoofing. + +# D. Speech Foundation Model-based Anti-spoofing + +As discussed in the previous subsection, speech foundation models can capture more informative representations than handcrafted or raw acoustic features [3]. This makes them highly effective for speech anti-spoofing, as they generalize well across datasets and are more robust to unseen attacks [15]. As a result, many recent anti-spoofing systems increasingly adopt these models as front-ends, feeding their features to the back-end classifiers and consistently outperforming traditional models [16], [29], [30]. + +To connect these powerful front-end models to downstream classifiers, a feature aggregation layer is introduced, as shown + +TABLEI CONTRIBUTION OF THE DR LAYER ON THE NUMBER OF PARAMETERS AND COMPUTATIONAL COST IN BACK-END MODELS. MMACS STANDS FOR MILLION MULTIPLY-ACCUMULATE OPERATIONS. + +
Back-end ModelParametersMMACs
DRTotal%DRTotal%
ResNet [19]131k611k21%26.2470.6237%
Res2Net [18]131k452k29%26.2464.9340%
ECAPA [34]131k497k26%26.2480.2133%
AASIST [26]131k447k29%26.24707.654%
+ +in Fig. 1. This layer combines features from different SSL layers using methods such as a simple weighted sum or attention-based methods like Squeeze-and-Excitation Aggregation (SEA) [16] and Attentive Merging (AttM) [31]. + +Following the aggregation layer, the resulting features are passed to the back-end classifier, as shown in the green box of Fig. 1. Existing methods typically use a DR layer, which reduces the high-dimensional features of $N$ channels (commonly $N = 1024$ [1], [3], [32]) to a lower dimension $D$ (e.g., $D = 128$ [15], [16] or $D = 144$ [17], [33]) to match the classifier's input requirements. The classifier model then extracts features from the DR layer outputs and produces the final score. As illustrated in the red box of Fig. 1, commonly used classifier structures include traditional models such as ResNet [19], Res2Net [18], ECAPA-TDNN [34], and AASIST [26]. + +The strong performance of these systems stems from their ability to capture rich speech representations, enabling more accurate distinction between real and spoofed speech. As a result, these systems have achieved SOTA results [33], [35], [36], especially in recent challenges like ASVspoof 5 [28], [37], CtrSVDD [16], [38], [39], and ADD [40]. However, the use of a DR layer introduces challenges that limit the backend's ability to fully leverage the rich representations from speech foundation models. In this work, we aim to better unlock the potential of foundation models for speech antispoofing. These issues will be discussed in the next subsection. + +# E. Limitation of Dimensionality Reduction Layer + +Existing speech foundation model-based anti-spoofing systems excel in extracting rich, high-dimensional feature representations, which capture intricate patterns in speech. However, this high dimensionality poses a significant challenge for downstream tasks. Models in these tasks typically require lower-dimensional features [23], [26], [27], creating a mismatch between the output features of the foundation models and the requirements of downstream processing. + +A commonly used approach for dimensionality reduction is to employ a DR layer. However, this approach has several issues, including parameter overhead and potential information loss. As shown in Table I, our analysis of back-end models further emphasizes the inefficiency of this approach. We consider commonly used feature dimensions of $N = 1024$ from large models [1], [3], and a reduced dimension of $D = 128$ , widely adopted in SOTA back-end models [15], [16], [31]. + +Across various back-end models, the DR layer, despite being just a single layer, consistently accounts for a substantial + +![](images/60a1d06954731a1d8497f7ad39edfbb82ed60f30aa0d12cd56c49475a8a4119e.jpg) +Fig. 1. The block diagram of the speech foundation model-based speech anti-spoofing system, showcasing both the traditional back-end models and the proposed Nes2Net back-end. The traditional back-end models include a DR layer and a classifier, such as ResNet [19], Res2Net [18], ECAPA-TDNN [34], and AASIST [26]. In contrast, the proposed Nes2Net back-end model features a DR layer-free design. Additionally, an enhanced version of its nested layer, named Nes2Net-X, is introduced to further improve performance. Abbreviations used in the figure include: 'FC' (fully connected layer), 'Conv' (convolutional layer), 'WS' (weighted sum), 'SE' (squeeze-and-excitation module) [41], and 'Att. Stat. Pool.' (attentive statistics pooling) [42]. + +share of parameters and computational cost, underscoring its resource-intensive nature. For instance, the DR layer accounts for $21\%$ to $29\%$ of the parameters across ResNet, Res2Net, ECAPA, and AASIST. In terms of computational cost, the DR layer generally contributes at least one-third of the total MACs. AASIST is the only exception, where the DR layer accounts for just $4\%$ of the MACs, primarily because its overall MAC count is an order of magnitude higher than that of other models. + +This table highlights that a single DR layer significantly inflates the back-end model's size and resource demands. Furthermore, its direct projection design discards important high-dimensional features, limiting the overall potential of speech foundation models. + +# III. METHODOLOGY + +# A. Proposed Nested Res2Net (Nes2Net) + +The design of Nes2Net is driven by two primary objectives: 1) effectively and efficiently utilizing the high-dimensional features from speech foundation models, and 2) enhancing multi-scale feature extraction to achieve robust generalization in speech anti-spoofing tasks. These objectives are realized through a novel nested architecture that simultaneously improves the efficiency, flexibility, and robustness of the model. + +Efficiency and Retention of Rich Feature Information: The analysis in Section II-E reveals the limitations of employing the DR layer. Building upon the observations, Nes2Net entirely removes the DR layer, directly processing high-dimensional features to retain their intrinsic richness and minimize unnecessary computational costs. By bypassing the DR layer, Nes2Net prevents the information bottleneck typically caused by early dimensionality reduction. This ensures the preservation of detailed representations essential for accurately distinguishing genuine speech from spoofed audio. + +Enhanced Multi-Scale Feature Interaction and Expressiveness: While the Res2Net architecture effectively extracts multi-scale features through hierarchical splits, it exhibits significant limitations when processing high-dimensional features directly, especially with large split scales $s$ . Specifically, Res2Net suffers from feature dilution [18], redundant transformations [43], and restricted interactions among channels. Excessive splitting fragments the features, weakening their expressiveness, and repetitive transformations increase computational redundancy, potentially causing overfitting. Moreover, closely related information can be distributed across non-adjacent subsets, limiting effective cross-channel interactions. + +To overcome these limitations, as illustrated in Fig. 1, we propose a novel Nested Res2Net (Nes2Net) architecture that introduces a hierarchical nesting structure. This additional degree of flexibility significantly enhances the model's representational capability. Each nested layer progressively refines features by building upon outputs from preceding layers and also incorporates efficient local cross-channel attention mechanisms [44], [45], strengthening interactions across channels. This holistic feature extraction approach enables Nes2Net to comprehensively capture intricate speech patterns. Moreover, the cumulative refinement effectively mitigates the issue of feature dilution, preserving rich and expressive multi-scale information. Benefiting from the structural advantages of the nesting strategy, the need for excessive fine-grained splits is reduced, effectively mitigating redundant transformations. This approach also minimizes unnecessary computations, resulting in a compact yet highly expressive model. + +Critically, overfitting is a well-known challenge in speech anti-spoofing tasks, often leading to degraded performance in cross-domain scenarios. Previous studies [23], [26], particularly with compact models like AASIST and Res2Net (both with fewer than 500k parameters), have shown that smaller models can help reduce overfitting. Our experiments with these + +models confirm that simply increasing their size does not always lead to better performance and can, in fact, make overfitting worse. As a result, improving feature quality through smarter model structure design becomes more important than just scaling up the model. The nested architecture of Nes2Net provides clear benefits as it maintains computational efficiency while reducing the risk of overfitting. + +The Nes2Net consists of an outer layer and several identical nested layers, described as follows: + +1) Outer Layer: The outer layer of Nes2Net adopts a structure similar to that of Res2Net. The high-dimensional features produced by a speech foundation model are uniformly split into $s_1$ feature map subsets, denoted by $x_i$ , where $i \in \{1, 2, \dots, s_1\}$ . Each feature subset $x_i$ has the same spatial size but contains only $\frac{1}{s_1}$ of the channels of the input feature map. With the exception of $x_1$ , each $x_i$ is paired with a corresponding nested layer, denoted by $\mathbf{K}_i(\cdot)$ . The output of $\mathbf{K}_i(\cdot)$ , represented as $y_i$ , is computed as follows: + +$$ +y _ {i} = \left\{ \begin{array}{l l} x _ {i} & i = 1; \\ \mathbf {K} _ {i} \left(x _ {i}\right) & i = 2; \\ \mathbf {K} _ {i} \left(x _ {i} + y _ {i - 1}\right) & 2 < i \leq s _ {1}. \end{array} \right. \tag {1} +$$ + +where $x_{i}$ is first added to the output of $\mathbf{K}_{i - 1}(\cdot)$ , and the resulting feature map is then fed into $\mathbf{K}_i(\cdot)$ for further processing. All $y_{i}$ features are concatenated along the channel dimension. Due to the combinatorial explosion effect [18], the output features encapsulate a fusion of receptive field characteristics across different scales and frame levels. These features are then pooled along the time axis to convert frame-level features into utterance-level representations, which are subsequently used to compute the final classification score. + +It is worth noting that since the outer layer directly processes high-dimensional features from the speech foundation model, the original two convolutional layers (kernel size of 1) used before splitting and after concatenation in Res2Net are removed to improve efficiency. + +2) Nested Layer: The nested layer acts as the core module responsible for processing the outer layer's intermediate features, denoted by $x_{i}^{\prime}$ , where $i \in \{2, \ldots, s_1\}$ . Based on Eq. 1, $x_{i}^{\prime}$ is defined as: + +$$ +x _ {i} ^ {\prime} = \left\{ \begin{array}{l l} x _ {i} & i = 2; \\ x _ {i} + y _ {i - 1} & 2 < i \leq s _ {1}. \end{array} \right. \tag {2} +$$ + +Each nested layer $\mathbf{K}_i(\cdot)$ is designed to extract multi-scale representations from its input while maintaining computational efficiency. As shown in Fig. 1, the structure of $\mathbf{K}_i(\cdot)$ follows a SE-Res2Net-like design, but its input is the feature subset $x_i'$ from the outer layer of Nes2Net. Specifically, each nested layer consists of the following components: + +Convolutional Layers: The input feature map is first processed by a convolutional layer with a kernel size of 1 to extract local features while preserving the spatial dimensions. + +Multi-Scale Feature Extraction: To enable multi-scale processing, the input feature map $x_{i}^{\prime}$ is equally split into $s_2$ subsets along the channel dimension, denoted by $x_{i,j}^{\prime}$ , where $j \in \{1, 2, \ldots, s_2\}$ . Each subset undergoes separate + +transformations through convolutional operations $\mathbf{M}_j$ with varying receptive fields, yielding $y_{i,j}$ , formulated as: + +$$ +y _ {i, j} = \left\{ \begin{array}{l l} x _ {i, j} ^ {\prime} & j = 1; \\ \mathbf {M} _ {j} \left(x _ {i, j} ^ {\prime} + y _ {i, j - 1}\right) & 1 < j \leq s _ {2}. \end{array} \right. \tag {3} +$$ + +These transformed subsets are then concatenated to form the output $y_{i}$ of the nested layer. + +SE Module: To further enhance the feature representations, a Squeeze-and-Excitation (SE) module is integrated into each nested layer. The SE module adaptively recalibrates channelwise features to emphasize informative features and suppress less relevant ones [41]. + +Residual Connections: To enhance gradient flow and stabilize training, a residual connection is applied by adding the input of $x_{i}^{\prime}$ to its output $y_{i}$ . This design preserves the original information while incorporating newly learned features. + +In summary, the nested layer is lightweight, highly efficient, and designed to improve robustness and generalization across diverse conditions. + +# B. Enhanced Nested Res2Net (Nes2Net-X) + +Nes2Net efficiently addresses the high-dimensional feature issue. However, it relies on an additive combination method within the nested layer, which may limit the flexibility and effectiveness of feature extraction, as it implicitly assigns equal importance to all features. To further enhance the representational capacity of Nes2Net, we propose an improved variant named Nes2Net-X. It replaces the original addition operation in the nested layer with a concatenation followed by a learnable weighted summation. This design explicitly preserves feature subset individuality before fusion and employs learnable weights to adaptively combine these subsets. The Nes2Net-X consists of the following components: + +Feature Splitting and Processing: This component is the same as that in Nes2Net nested layer. The input feature $x_{i}^{\prime}$ is equally split into $s_2$ subsets along the channel dimension, denoted by $x_{i,j}^{\prime}$ , where $j \in \{1, 2, \dots, s_2\}$ . Each subset $x_{i,j}^{\prime}$ undergoes a convolutional operation to extract feature representations. + +Feature Concatenation: The outputs of the convolutional layers are denoted as $z_{i,j}$ . In Nes2Net-X, instead of summing the processed features as in the Nes2Net, each current subset $x_{i,j}^{\prime}$ is concatenated with the previous output $z_{i,j-1}$ along a newly introduced dimension before being processed. + +Weighted Sum: The additional dimension created during concatenation is merged back into the original feature space using a 'weighted sum' operation. This operation enables the model to dynamically assign importance to each subset, enhancing feature representation. For each subset, the 'weighted sum' is applied to the output feature $z_{i,j}$ of the convolutional layer. Let $w_{i,j}$ denote the learnable weights assigned to each concatenated feature. The output $y_{i,j}$ of the 'weighted sum' is computed as: + +$$ +y _ {i, j} = \sum_ {k = 1} ^ {s} w _ {i, j, k} \cdot z _ {i, j, k} \tag {4} +$$ + +where $s$ denotes the number of subsets, $w_{i,j,k}$ represents the weight for the $k$ -th subset features $z_{i,j,k}$ . + +The weighted summation provides more flexible and effective feature integration, offering several advantages: + +- Enhanced Feature Diversity: By concatenating features across subsets, the network captures a richer set of features, encompassing various aspects of the input data. +- Learnable Feature Fusion: The introduction of learnable weights $w$ enables the model to prioritize more informative features, effectively suppressing less relevant ones. This adaptive mechanism allows the network to focus on the most discriminative features for the task. +- Improved Gradient Flow: By combining concatenation with weighted summation, the model facilitates better gradient propagation during training. This helps address potential issues such as vanishing or exploding gradients, leading to more stable and efficient learning. + +These modifications enable Nes2Net-X to retain the strengths of Nes2Net while introducing greater flexibility in feature fusion, ultimately improving performance. + +# IV. EXPERIMENTAL SETUPS + +# A. Datasets + +TABLE II AN SUMMARY OF THE DATASETS USED IN OUR EXPERIMENTS. + +
DatasetSpoofing TypeNumber of Samples
TrainValidTest
CtrSVDD w/o ACEsinger bona fide [46]Singing Voice84,40443,62564,734
CtrSVDD w/ ACEsinger bona fide [46]67,579
ASVspoof 2019 [47]25,38024,844-
ASVspoof 2021 LA [48]--181,566
ASVspoof 2021 DF [48]Speech--611,829
ASVspoof 5 [49]182,357140,950680,774
In-the-Wild [50]--31,779
PartialSpoof [51]Partial Spoof25,38024,84471,237
+ +We use five datasets across various scenarios, including singing voice deepfake, fully spoofed speech, adversarial attacks, and partially spoofed speech, to evaluate the performance of the proposed model. Singing voice deepfake detection (SVDD) is a growing area of interest in the research community [46], [52], [53]. The CtrlSVDD dataset [46], [52] offers structured attack types and official evaluation protocols, making it suitable for systematic architecture exploration. As a newly collected resource, it captures recent spoofing techniques, providing a more challenging and relevant benchmark for modern anti-spoofing systems. We therefore adopt it as a representative example. Moreover, fully spoofed speech is the most studied category. In this work, we include two categories of datasets: (1) the ASVspoof series, which comprises ASVspoof 2019 [47], ASVspoof 2021 Logical Access (LA), ASVspoof 2021 Deepfake (DF) [48], and ASVspoof 5 [49]; and (2) the In-the-Wild dataset [50], which reflects real-world usage scenarios. Partially spoofed speech alters only part of an utterance to convey deceptive meaning. This emerging challenge has attracted growing attention. We use the PartialSpoof [51] dataset as a representative benchmark. Table II summarizes the datasets used in this study. Models are trained on the training set and validated on the validation set to select the best checkpoint for testing. + +For CtrlSVDD [46], we report results on two official test protocols, according to whether ACESinger bona fide samples are included. The 'A14' attack type of the CtrlSVDD dataset is excluded following the official guidelines [46]. ASVspoof 2019 [47] is used only for training and validation, while the In-the-Wild [50], ASVspoof 2021 LA and DF [48] datasets are used only for testing. For the recently released ASVspoof 5 dataset [49], we use its train, development, and evaluation partitions for model training, validation, and testing, respectively. For PartialSpoof [51], we follow the standard partitioning into train, development, and evaluation sets. + +# B. Training Strategies + +Each experiment is run three times using different random seeds. We report both the result from the best-performing run and the average performance across all runs. The values of $s_1$ and $s_2$ are both set to 8 for Nes2Net and Nes2Net-X. The baseline systems for each dataset are built using SOTA models, and our proposed model adopts similar training strategies. The details are as follows: + +![](images/efe1584a963bd86fedb0e112a862eb4e48cd69fc72a15bbe69361f939014ea25.jpg) +Fig. 2. The cyclic learning rate schedule using cosine annealing. + +CtSVDD: For the models trained on the CtrSVDD dataset [46], [52], we follow the baseline system from $[16]^1$ . Following the setting in [16], we use a random seed of 42 to ensure reproducibility. Furthermore, due to the inherent stochasticity in deep learning, repeated runs are necessary to obtain reliable average results. We use the AdamW optimizer with batch size 34, an initial learning rate of $1 \times 10^{-6}$ , and weight decay of $1 \times 10^{-4}$ . The learning rate is scheduled using cosine annealing with a cycle to a minimum of $1 \times 10^{-9}$ . + +As shown in Fig. 2, over 75 training epochs, we select checkpoints from the epoch with the minimum learning rate, as well as its preceding and following epochs, for validation. The best validation result is then used for testing. We use binary focal loss [54], a generalization of binary cross-entropy loss, with a focusing parameter $(\gamma)$ of 2 and a positive class weight $(\alpha)$ of 0.25. To standardize input length, each sample is randomly cropped or padded to 4 seconds during training. We adopt the Rawboost 'parallel: $(1)+(2)$ ' data augmentation strategy [55], as explored in [16]. WavLM is used as the frontend model for this dataset. The pre-trained and implementation of WavLM are obtained from S3PRL2. + +ASVspoof 2019 & 2021: For the models trained on the ASVspoof 2019 [47] dataset, we follow the baseline system proposed in $[15]^3$ . Audio data are cropped or concatenated to create segments of approximately 4 seconds in duration (64,600 samples) for both training and testing. We use the + +$^{1}$ https://github.com/Anmol2059/SVDD2024 +$^{2}$ https://github.com/s3prl/s3prl +3https://github.com/TakHemlata/SSL_Anti-spoofing + +TABLE III PERFORMANCE IN EER $(\%)$ ON THE CTRSVDD EVALUATION SET [46] WITH WAVLM [3] FRONT-END. RESULTS ARE SHOWN AS 'BEST (MEAN)' OVER 3 RUNS. PARAMETERS. AND MMACs REFER TO NUMBER OF PARAMETERS AND MILLION MULTIPLY-ACCUMULATE OPERATIONS, RESPECTIVELY. W/O AND W/ ACE B.F. REFER TO 'WITHOUT' AND 'WITH' ACESINGER BONA FIDE SAMPLES, RESPECTIVELY. ATTACK-SPECIFIC EERS ARE COMPUTED UNDER THE 'W/O ACE B.F' CONDITION. BEST RESULTS ARE IN BOLD; SECOND-BEST ARE UNDERlined.' $\dagger$ DENOTES IMPLEMENTATION CONDUCTED BY US. + +
Back-endParams.MMACsEER of Different Attack TypesPooled EER
A9A10A11A12A13w/o ACE. B.F.w/ ACE. B.F.
XWSB [39] *--------2.32
SLS [39]--------2.59
AASIST (C=32) [16]447k707.65------2.70
AASIST Light (C=24) †159k91.351.27 (1.37)0.87 (1.00)5.44 (5.86)4.84 (5.65)0.98 (1.05)3.95 (4.35)3.41 (3.77)
AASIST Standard(C=32) †447k707.651.18 (1.28)0.73 (0.86)3.63 (3.86)5.65 (5.77)0.88 (1.00)3.30 (3.36)2.79 (2.89)
AASIST Large(C=40) †662k1,091.281.32 (1.37)0.87 (0.97)3.70 (3.96)5.04 (5.63)0.96 (1.06)3.19 (3.36)2.71 (2.94)
AASIST XL(C=48) †835k1,555.561.23 (1.36)0.76 (0.92)3.40 (4.64)4.93 (5.55)0.89 (1.06)3.12 (3.62)2.76 (3.18)
AASIST XXL(C=56) †1,087k2,104.570.96 (1.20)0.66 (0.84)3.86 (4.15)4.83 (5.43)0.75 (0.95)3.05 (3.43)2.65 (2.95)
ResNet †611k70.621.18 (1.21)0.80 (0.93)3.97 (5.06)4.60 (4.86)0.96 (1.03)3.11 (3.61)2.74 (3.17)
Res2Net †452k64.931.26 (1.37)0.83 (0.86)3.59 (4.08)4.45 (4.80)1.08 (1.09)3.02 (3.24)2.61 (2.78)
ECAPA-TDNN (C=128) †497k80.211.18 (1.39)0.67 (0.85)4.47 (5.84)4.63 (4.96)0.87 (1.04)3.19 (3.74)2.79 (3.30)
Proposed Nes2Net511k58.111.23 (1.34)0.76 (0.81)2.40 (2.43)5.00 (5.24)0.96 (0.99)2.53 (2.55)2.22 (2.27)
Proposed Nes2Net-X511k91.351.21 (1.23)0.63 (0.76)2.09 (2.32)4.99 (5.24)0.83 (0.92)2.48 (2.51)2.20 (2.24)
+ +$\text{※}$ XWSB is an ensemble-like model that combine two SSL front-ends [39], while all other models in Table III are based on single SSL front-end. + +Adam optimizer [56] with a weight decay of $1 \times 10^{-4}$ . To reproduce the AASIST baseline [15], we reduce the original batch size from 14 to 8 due to GPU memory constraints, and halve the learning rate from $1 \times 10^{-6}$ to $5 \times 10^{-7}$ . For Nes2Net, benefiting from its lower GPU memory consumption, we use a batch size of 12 with a learning rate of $2.5 \times 10^{-7}$ . The loss function used is weighted Cross Entropy. Following [15], we apply Rawboost augmentations [55], specifically 'series: $(1 + 2 + 3)$ ' (Algo4) and 'series: $(1 + 2)$ ' (Algo5), for AASIST baselines. For the proposed Nes2Net-X, only the former augmentation is applied. All models are trained for 100 epochs and the best checkpoint on the validation set is used for testing on the ASVspoof 2021 [48] and In-the-Wild [50] datasets. + +ASVspoof 5: Both our AASIST baseline and the proposed Nes2Net-X models are trained using settings similar to those used for AASIST in the ASVspoof 2019 corpus. However, several differences apply. The final learning rate is set to $1 \times 10^{-7}$ , we apply data augmentation using MUSAN [57] and RIR [58], and training is stopped if there is no improvement on the development set for 5 consecutive epochs. + +**PartialProof:** For models trained on the PartialSpoof [51], we follow the baseline systems described in [51], $[59]^4$ . Specifically, we use wav2vec 2.0 as the front-end, the MSE for P2SGrad [60] as the loss function, and Adam [56] as the optimizer. Following [59], the batch size is set to 2, and a learning rate of $2.5 \times 10^{-6}$ is adopted for the baseline systems. For the proposed Nes2Net and Nes2Net-X, the learning rate is set to $1 \times 10^{-5}$ . The pooling layer used for the proposed Nes2Net and Nes2Net-X is the Attentive Statistics Pooling [42], and the reduction ratio of SE module is set to 8. Training is terminated if no improvement is observed on the development set for 20 consecutive epochs. The epoch yielding the best performance on the development set is used for testing. + +# V. RESULTS AND ANALYSIS + +All Equal Error Rate (EER) results in this work are reported as 'best (mean)' over multiple runs. For cited results that (1) + +are based on a single run, (2) report only the best result, or (3) lack sufficient details, only a single value is presented. + +# A. Studies on the CtrSVDD dataset + +We conduct experiments on the CtrSVDD dataset [46], following two testing protocols: one including ACESinger bona fide samples and the other excluding them [38]. While results for both protocols are reported in Table III, our primary analysis focuses on the scenario 'without ACESinger bona fide (w/o ACE. B.F.)', as recommended by the dataset creators. Since AASIST $(\mathrm{C} = 32)$ in our prior work [16], as well as SLS and XWSB [39], were evaluated during the CtrSVDD Challenge 2024, portions of their test sets differ from the current official protocol. As a result, the EER by attack type is not directly comparable. To ensure a fair comparison, we re-implemented the AASIST $(\mathrm{C} = 32)$ system under the official protocol and used it as our baseline, referred to as AASIST Standard $(\mathrm{C} = 32)$ in Table III, achieving an EER of $2.79\%$ which is close to the originally reported $2.70\%$ [16]. Under the 'w/o ACE B.F.' condition, the best run achieves an EER of $3.30\%$ with an average of $3.36\%$ across three runs. Further experiments show that scaling up the AASIST model does not improve mean EER, possibly due to parameter redundancy. + +We additionally evaluate several widely-used baseline systems, including ResNet [19], Res2Net [18], and ECAPATDNN [34]. ECAPA-TDNN and ResNet achieve EERs of $3.74\%$ and $3.61\%$ , respectively, which are slightly worse than that of AASIST. In contrast, Res2Net benefits from the advantages of multi-scale feature extraction, delivering the best average performance among the baseline systems with an EER of $3.24\%$ . Our proposed Nes2Net outperforms all baseline systems, achieving a mean EER of $2.55\%$ with the lowest computational cost. Furthermore, the enhanced version, Nes2Net-X, further improves the performance to $2.51\%$ EER, marking the best single-model performance reported to date. Compared to Res2Net, ResNet, ECAPA-TDNN, and SOTA AASIST ( $C = 32$ ), Nes2Net-X achieves EER reductions of $23\%$ , $30\%$ , $33\%$ , and $25\%$ , respectively. + +TABLE IV PERFORMANCE IN EER $(\%)$ ON THE CTRSVDD EVALUATION SET [46], COMPARING THE PROPOSED NES2NET WITH RES2NET AND ITS VARIOUS VARIANTS. THE RESULTS ARE REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS, E.G., 3.02 (3.24) IN THE FIRST ROW, OR AS THE RESULT OF A SINGLE EXPERIMENT, E.G., 3.21 IN THE SECOND ROW. 'B' AND 'S' REPRESENT THE NUMBER OF BLOCKS AND SCALE OF RES2NET, RESPECTIVELY. + +
Back-endDimensionality Reduction LayerReduced Dimension DParams.MMACsPooled EERRemarks
w/o ACE. B.F.w/ ACE. B.F.
Res2Net (b=4, s=4)128452k64.933.02 (3.24)2.61 (2.78)
Res2Net (b=4, s=16)128427k59.953.212.80■ increase scale s
Res2Net (b=4, s=64)128419k58.283.152.74
Res2Net (b=4, s=128)128417k57.983.262.88
Res2Net (b=4, s=4)64180k23.254.323.76change D
Res2Net (b=4, s=4)2561,273k202.913.833.38
Res2Net-woDR (b=1, s=4)×-861k119.154.153.62
Res2Net-woDR (b=1, s=8)×-615k70.124.233.71
Res2Net-woDR (b=1, s=16)×-456k38.243.823.35remove dimensionality reduction layer and increase scale s
Res2Net-woDR (b=1, s=32)×-367k20.452.98 (3.45)2.56 (3.02)
Res2Net-woDR (b=1, s=64)×-320k11.102.73 (2.97)2.42 (2.61)
Res2Net-woDR (b=1, s=128)×-296k6.313.292.88
Res2Net-woDR (b=1, s=256)×-284k3.883.573.13
Res2Net-woDR (b=2, s=64)×-637k21.783.202.82increase depth
Res2Net-woDR (b=4, s=64)×-1,270k43.153.09 (3.18)2.73 (2.83)
Proposed Nes2Net×-511k58.112.53 (2.55)2.22 (2.27)proposed nested design
Proposed Nes2Net-X×-511k91.352.48 (2.51)2.20 (2.24)
+ +We also analyze performance across different synthetic attack types using the 'w/o ACE B.F.' protocol. Except for the 'A12' attack type [46], our model consistently achieves either the best or second-best performance, demonstrating strong generalization and robustness. Notably, the 'A12' attack type, based on Singing Voice Synthesis (SVS), proves particularly challenging, showing higher EER across all models and highlighting a potential area for future improvement. + +We observe that performance trends are consistent across both conditions, with and without ACESinger bona fide samples. Moreover, the EER is lower when ACESinger bona fide samples are included. This indicates that, even though ACESinger bona fide samples are considered out-of-domain, the trained models exhibit strong generalization capabilities and are able to classify these samples accurately. + +# B. The Roadmap of the Nes2Net + +In this section, we introduce the roadmap from Res2Net to the proposed Nes2Net, with detailed results summarized in Table IV. All systems are implemented and evaluated under a unified framework for fair comparison. To aid interpretation, we visualize the number of parameters, MACs, and EER. These are represented in Fig. 3 by circle size, the horizontal axis, and the vertical axis, respectively. In the following, we provide detailed analyses: + +Investigating Res2Net: Among the baselines in Table III, the Res2Net-based back-end outperforms ResNet, AASIST, and ECAPA-TDNN on the CtrlSVDD dataset. Therefore, we select it as the reference baseline for further investigation. First, we experiment with adjusting the scale $s$ of Res2Net. We observe that as $s$ increases, the number of split groups increases linearly; however, the performance shows no significant improvement (depicted as the teal blue line in Fig. 3). This may be because adding too many split groups dilutes the feature representation, leading to redundancy. + +Next, we explore varying the dimensionality of the output features from the DR layer (referred to as Reduced Dimension + +![](images/b205df6253934c495cd6bf54f649c643eae158455b8a5a57b5838ccd4e16a70f.jpg) +Fig. 3. Visualization of Table III and IV, highlighting our exploration of Res2Net and the roadmap of architectural changes leading to Nes2Net. + +$D$ , depicted as the steel gray line in Fig. 3). Reducing $D$ to 64 significantly lowers model size and MACs, compared to the default $D = 128$ , but leads to substantial performance degradation, increasing EER from $3.02\%$ to $4.32\%$ . Conversely, increasing $D$ to 256 results in a much larger model size and MACs but still leads to worse performance than $D = 128$ . This may be because a larger $D$ introduces over-parameterization and noise. This may explain why $D = 128$ is commonly adopted in SOTA models [15], [16]. + +Removal of DR Layer: Foundation models often incorpo + +rate a DR layer in their back-end architecture to compress high-dimensional features into lower-dimensional representations, facilitating downstream tasks. For instance, models like wav2vec 2.0-AASIST [15] utilize such a layer alongside task-specific classifiers (e.g., AASIST, ResNet). However, as discussed in Section II-E, this projection layer consumes a substantial portion of the back-end model's parameters and MACs while potentially causing information loss. + +To explore whether bypassing this layer preserves more task-relevant information, we propose a new back-end model: ResNet without Dimensionality Reduction (ResNet-woDR). By directly processing high-dimensional features, ResNet-woDR simplifies the architecture and focuses on the raw features extracted by the speech foundation model. The naming emphasizes the absence of a DR layer, differentiating it from traditional approaches. + +We further evaluate the performance of ResNet-woDR with different scales $s$ (depicted as the green line in Fig. 3). The best performance is observed with $s = 64$ , achieving a mean EER of $2.97\%$ , which surpasses the best Res2Net baseline. Increasing $s$ beyond this point leads to a decline in performance, likely due to the following factors: + +- Feature Dilution. A large $s$ excessively fragments feature representations, weakening their expressiveness and resulting in diluted, less informative features [18]. +- Redundant Transformations. An overly large $s$ introduces unnecessary feature transformations, leading to overfitting and reduced generalization [43]. +- Restricted Feature Interaction. Since channels are unordered, distant groups may still contain correlated information. In this case, the additional convolutional layers introduced by splitting limit their interactions, weakening the model's ability to capture complex patterns. + +Based on the optimal $s$ , we increase the number of blocks $b$ to deepen the model (depicted as the light pink line in Fig. 3). However, no further performance improvement is observed. This could be attributed to the deeper architecture's limited ability to effectively utilize the additional parameters, resulting in diminishing performance gains. It may also increase the risk of overfitting. + +The Novel Nested Design: Prior experiments demonstrate that removing the DR layer enhances the performance of Res2Net. We believe that directly extracting information from high-dimensional speech foundation model features avoids the information loss introduced by DR. Our experiments with variations in scale, depth, and dimensionality show that a mean EER of $2.97\%$ marks a performance bottleneck for this design. + +Compared to ResNet-woDR, the proposed Nes2Net adopts a novel nested design that enhances flexibility and significantly boosts the model's representational capacity. Processing larger feature subsets in the outer layer facilitates better interactions across channels within each nested layer. Furthermore, the integrated local cross-channel attention mechanism enhances feature selection while mitigating redundancy, addressing limitations in prior designs. This architectural refinement overcomes the performance limitations observed in the original Res2Net design. As a result, Nes2Net and its enhanced variant + +Nes2Net-X surpass the earlier performance bottleneck, achieving mean EERs of $2.55\%$ and $2.51\%$ , respectively. + +# C. Studies on the ASVspoof 2021 dataset + +TABLE V PERFORMANCE IN EER $(\%)$ ON THE ASVspoof 2021 LA AND DF. THE RESULTS ARE REPORTED IN THE FORMAT OF 'BEST (MEAN). CKPT AVG. REFERS TO THE NUMBER OF CHECKPOINTS AVERAGED. $\ddagger$ DENOTES RE-IMPLEMENTATION CONDUCTED BY US. 'ALGO4' AND 'ALGO5' REPRESENT RAWBOOST SERIES AUGMENTATIONS: $(1 + 2 + 3)$ AND $(1 + 2)$ [55], RESPECTIVELY. PARAMETERS THAT ARE UNDERlined ARE CALCULATED BY US. $-$ REPRESENTS UNKNOWN. N/A INDICATES THAT THE SYSTEM DOES NOT USE THE AVERAGE CHECKPOINTS METHOD. + +
RemarkFront-endBack-end ModelBack-end ParametersCKPT Avg.ASVspoof 2021
LADF
2022wav2vec2.0 FIR-NB [61]--3.546.18
2022wav2vec2.0 FIR-WB [61]--7.084.98
2022wav2vec2.0 LGF [62]--9.664.75
2023wav2vec2.0 Conformer (fix) [63]2,506k551.382.27
2023wav2vec2.0 Conformer (var) [63]2,506k50.877.36
2024wav2vec2.0 Ensembling [64] ‡--2.32 (4.48)5.60 (8.74)
2024WavLMASP+MLP [65]1,051k-3.314.47
2024wav2vec2.0 SLIM [14]---(4.4)
2024WavLMAttM-LSTM [31]936k6N/A3.503.19
2024wav2vec2.0 FTDKD [66]--2.962.82
2024wav2vec2.0 AASIST2 [67]--1.612.77
2024wav2vec2.0 MFA [68]--5.082.56
2024wav2vec2.0 MoE [69]--2.962.54
2024wav2vec2.0 OCKD [70]--0.902.27
2024wav2vec2.0 TCM [33]2,383k751.032.06
2024wav2vec2.0 SLS [35]23,399k8-2.87 (3.88)1.92 (2.09)
2025wav2vec2.0 LSR+LSA [71]--1.192.43
2025wav2vec2.0 LSR+LSA [71] ※--1.051.86
2025wav2vec2.0 WaveSpec [72]---1.90
2025wav2vec2.0 Mamba [17]1,937k950.931.88
2025wav2vec2.0 SSL-EOW-S. [73] ‡---1.75 (2.91)
2025wav2vec2.0 Cal. Ensemble [73] ‡---(2.03)
2022wav2vec2.0 AASIST [15]447k10N/A0.82 (1.00)2.85 (3.69)
wav2vec2.0 AASIST (algo4)447kN/A1.13 (1.36)3.37 (4.09)
wav2vec2.0 AASIST (algo5)447kN/A0.93 (1.40)3.56 (5.07)
Ourswav2vec2.0 Nes2Net511kN/A1.61 (1.90)1.89 (2.12)
wav2vec2.0 Nes2Net-X511kN/A1.73 (1.95)1.65 (1.91)
wav2vec2.0 Nes2Net-X511k31.66 (1.87)1.54 (1.98)
wav2vec2.0 Nes2Net-X511k51.88 (2.00)1.49 (1.78)
+ +\*: with extra data augmentation [71] $\ddagger$ : ensemble of multiple models + +The ASVspoof series datasets are widely used as benchmarks for advancing research in detecting spoofed speech [47], [48]. Following the standard protocol, we train models on ASVspoof 2019 [47] and evaluate them on ASVspoof 2021 Logical Access (LA) and Deepfake (DF) tasks [48]. The LA task focuses on detecting synthetic and voice-converted speech transmitted over telephony systems, introducing challenges related to channel effects and transmission variability. In contrast, the DF task targets detecting manipulated, compressed speech data commonly found on online platforms. This reflects real-world scenarios where deepfake audio circulates, making the DF task a valuable benchmark for evaluating deepfake detection systems. + +The results in Table V show that for the LA track, our Nes2Net achieves a mean EER of $1.90\%$ , comparable to SOTA systems. For the DF track, which more closely reflects + +$^{5}$ https://github.com/ErosRos/conformer-based-classifier-for-anti-spoofing +$^{6}$ https://github.com/pandartialdTJU/AttM_INTERSPEECH24 +7https://github.com/ductuantruong/tcm_add +$^{8}$ https://github.com/QiShanZhang/SLSforASVspoof-2021-DF +9https://github.com/swagshaw/XLSR-Mamba +$^{10}$ https://github.com/TakHemlata/SSL_Anti-spoofing + +TABLE VI PERFORMANCE IN EER $(\%)$ FOR DIFFERENT TYPES OF VOCODERS AND COMPRESSION CONDITIONS ON THE ASVSPOOF 2021 DF TEST SET. THE FIVE EER VALUES FOR EACH SUB-ITEM, FROM LEFT TO RIGHT, CORRESPOND TO NES2NET-X, MAMBA [17], SLS [35], TCM [33], AND AASIST [15].THE BEST PERFORMANCE IS REPORTED IN BOLD FONTS, AND THE SECOND-BEST IS UNDERLINED. + +
Traditional VocoderWav ConcatenationNeural Autoreg.Neural Non-autoreg.UnknownPooled EER
C1 -0.36/0.78/1.21/0.95/1.220.76/0.76/0.80/0.76/2.282.70/3.88/3.12/3.89/3.450.52/0.87/0.68/0.95/1.561.64/1.63/1.23/1.73/1.991.47/1.89/1.72/2.23/2.34
C2 Low mp31.48/0.94/1.94/1.67/2.722.96/2.20/2.16/2.56/5.842.89/3.23/2.71/3.59/5.961.23/0.86/0.78/1.32/3.332.54/1.69/1.65/1.93/4.301.75/1.84/2.02/2.11/4.30
C3 High mp30.44/0.88/1.39/0.96/1.831.13/1.49/1.17/1.45/3.352.47/3.35/2.91/3.70/3.790.44/0.87/0.69/0.88/2.022.29/1.85/1.34/1.67/2.651.32/1.85/1.59/1.95/2.64
C4 Low m4a0.44/0.95/1.48/1.22/1.571.15/0.85/1.24/1.67/2.092.79/3.39/2.79/3.40/3.750.54/0.96/0.70/1.22/1.651.32/1.22/1.14/1.41/2.101.40/1.92/1.74/2.01/2.37
C5 High m4a0.45/0.80/1.34/0.98/1.160.62/0.76/0.71/0.76/2.102.77/3.48/2.96/3.73/3.390.56/0.90/0.64/1.07/1.341.88/1.70/1.34/1.43/1.871.59/2.05/1.79/1.96/2.14
C6 Low ogg0.69/1.13/2.14/1.44/2.350.80/0.97/0.91/0.91/2.231.92/2.80/2.44/2.79/3.670.48/0.78/0.61/0.84/1.621.05/1.14/1.00/1.01/2.231.09/1.61/1.88/1.87/2.58
C7 High ogg0.70/1.13/1.52/1.35/1.570.62/0.80/0.71/0.80/1.502.05/2.84/2.26/2.66/2.920.43/0.65/0.52/0.74/1.001.34/1.05/0.96/0.96/1.271.35/1.61/1.57/1.74/1.92
C8 mp3→m4a0.95/1.26/2.28/1.74/3.011.52/0.97/1.08/1.08/2.962.22/3.01/2.31/2.96/4.490.61/0.57/0.65/0.95/2.051.61/1.18/1.09/1.18/2.661.48/1.65/1.92/1.97/3.31
C9 ogg→m4a0.70/1.26/2.15/1.49/2.280.88/0.97/0.99/0.88/2.521.92/3.01/2.57/2.88/3.760.52/0.70/0.65/0.78/1.570.96/1.09/1.09/1.05/2.141.13/1.79/2.04/1.88/2.75
Pooled EER0.72/1.14/1.88/1.40/2.151.10/1.05/1.07/1.14/2.852.70/3.32/2.86/3.40/4.050.63/0.80/0.69/0.94/1.841.86/1.43/1.23/1.38/2.451.49/1.88/1.92/2.06/2.85
Traditional VocoderWav ConcatenationNeural AutoregressionNeural Non-autoregressionUnknownPooled EER
C1
C2
C3
C4
C5
C6
C7
C8
C9
Pooled EER
+ +Fig. 4. Visualization of the EER $(\%)$ across various vocoders and compression conditions on the ASVspoof 2021 DF test set. Each EER value is shown as a colored circle, where the size indicates the EER value, and the color represents the performance ranking among the five models: blue (best) to light red (worst). The five EER values for each sub-item, from left to right, correspond to the proposed Nes2Net-X, Mamba [17], SLS [35], TCM [33], and AASIST [15]. + +real-world scenarios as discussed earlier, the baseline system AASIST [15] achieves its best EER of $2.85\%$ and a mean EER of $3.69\%$ , remaining competitive with current SOTA systems. The SLS [35] and TCM [33] models achieve EERs close to $2\%$ , demonstrating strong performance at the SOTA level. The Mamba-based [17] model further improves results, reducing the EER to $1.88\%$ . Notably, our proposed Nes2Net attains its best EER of $1.89\%$ and a mean EER of $2.12\%$ EER, comparable to the performance of current SOTA systems. The enhanced variant, Nes2Net-X achieves the best performance among all compared systems, with its best EER of $1.65\%$ and a mean EER of $1.91\%$ . + +Inspired by prior works [17], [33], we average the weights of several top-performing checkpoints on the validation set to obtain an improved model. This approach further improves the performance of the DF task to a best EER of $1.49\%$ and a mean EER of $1.78\%$ , which, to the best of our knowledge, is the best performance reported to date. Furthermore, compared to Mamba [17], our model achieves this performance with approximately $74\%$ fewer parameters, demonstrating superior efficiency. + +The analysis above summarizes overall performance on the + +DF test set. The DF dataset also provides detailed labels for vocoder types and compression conditions, enabling more fine-grained analysis. To further evaluate performance, we compare the SOTA models Mamba, SLS, TCM, and AASIST with our proposed Nes2Net-X across these sub-tracks. The results are presented in Table VI. To improve readability and make the extensive numerical data easier to interpret, we also visualize the table's results in Fig. 4. + +For traditional vocoders, all models perform well, with most EERs below $2\%$ . Notably, our proposed Nes2Net-X achieves exceptional results, consistently yielding EERs under $1\%$ across all conditions except C2. This demonstrates the strong stability of Nes2Net-X when handling unseen and relatively simple scenarios. In contrast, for neural autoregressive vocoders, all models experience a noticeable drop in performance, with EER reaching up to $5.96\%$ . This indicates the greater challenge posed by the sequential and dynamic nature of autoregressive vocoders, which introduce higher variability in synthesis. Nevertheless, Nes2Net-X maintains a clear advantage over the competing models, demonstrating its robustness in handling these complex synthesis conditions. + +From the perspective of compression conditions, the differ + +ences in model performance are less pronounced compared to the variations observed across vocoder types. Nes2Net-X consistently achieves the lowest EERs across all compression conditions, regardless of the level of distortion introduced by compression. This consistency highlights the model's strong generalization ability across different levels of compressions. + +Overall, these findings demonstrate that Nes2Net-X is not only highly effective across diverse vocoder types, but also maintains superior performance under varying compression conditions. This robustness underscores the model's capability to handle both compression diversity and complex synthesis challenges, making it a reliable solution for deepfake audio detection across a wide range of scenarios. + +# D. The results on the In-the-Wild dataset + +TABLE VII PERFORMANCE IN EER $(\%)$ ON THE IN-THE-WILD [50] DATASET. OUR RESULT IS REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS. + +
Front-endYearBack-endEER
wav2vec 2.02022Rawnet&ASSIST (reported by [35])10.46
2024SLIM [14]- (12.5)
2024MoE [69]9.17
2024Conformer [63]8.42
2024TCM [33]7.79
2024OCKD [70]7.68
2024SLS [35]7.46 (8.87)
2024Pascu et al. [74]- (7.2)
2025Mamba [17]6.71
2025WaveSpec [72]6.58
2025LSR+LSA [71]5.92
2025LSR+LSA [71]※5.54
-Proposed Nes2Net5.80 (7.06)
-Proposed Nes2Net-X5.52 (6.60)
+ +$\text{※}$ with extra data augmentation [71] + +The In-the-Wild dataset [50] is a collection of deepfake videos sourced from the internet. Unlike controlled datasets, it captures the diverse and unpredictable nature of real-world scenarios. This diversity is essential for developing and evaluating deepfake detection models, as it challenges them to generalize effectively across a wide range of conditions. + +In addition, unlike many other datasets that rely on self-generated fake audio, this dataset is collected from publicly available video and audio files explicitly labeled as audio deepfakes [50]. To account for the potential presence of partial spoofing, we evaluate our proposed Nes2Net and Nes2Net-X using the entire duration of each test sample instead of restricting it to the first 4 seconds, as the latter approach risks missing partially spoofed segments. + +The testing results, alongside SOTA models, are reported in Table VII. We find that the overall performance trends are consistent with those seen on the ASVspoof 2021 DF dataset. However, EERs on the In-the-Wild dataset are generally higher than those on the DF dataset, reflecting greater complexity and variability in real-world scenarios. Notably, the proposed Nes2Net-X outperforms all SOTA models, achieving the lowest EER of $5.52\%$ and a mean EER of $6.60\%$ on this challenging dataset. + +# E. The results on the ASVspoof 5 dataset + +The ASVspoof 5 dataset represents the most recent edition in the ASVspoof series. Unlike earlier versions, it introduces + +TABLE VIII A COMPARISON BETWEEN THE PROPOSED NES2NET AND THE AASIST BASELINE SYSTEM ON THE ASVSPOOF 5 DATASET [49]. 'PARAMS.' AND 'MMACs' REFER TO THE NUMBER OF PARAMETERS AND THE NUMBER OF MILLION MULTIPLY-ACCUMULATE OPERATIONS, RESPECTIVELY. 'AVG.' INDICATES THE AVERAGE RELATIVE PERFORMANCE IMPROVEMENT ACROSS ALL THREE EVALUATION METRICS. + +
Back-endPerformance
ModelParams.↓MMACs↓CLLR↓minDCF↓EER↓Avg.
AASIST447k707.650.95870.16456.08Benchmark
Nes2Net511k58.110.79120.15686.137.1%
Nes2Net-X511k91.350.73440.15355.9210.9%
+ +adversarial attacks and is crowdsourced under various acoustic conditions [49]. As it is newly released, there are currently no existing systems available for a fair comparison. Therefore, we re-implement the AASIST system as a baseline and compare it with our proposed Nes2Net and Nes2Net-X model. Following the ASVspoof 5 challenge guidelines [49], we use WavLM [3] as the front-end. Based the evaluation protocol in [37], we assess performance using three metrics: Cost of Log-Likelihood Ratio (CLLR), minimum Detection Cost Function (minDCF), and EER, and present the results in Table VIII. We observe that the Nes2Net and Nes2Net-X backend models result in only a slight increase in the number of parameters compared to AASIST, while significantly reducing MMMs. Moreover, across all three evaluation metrics, the Nes2Net and Nes2Net-X back-ends improve performance by $7.1\%$ and $10.9\%$ , receptively. + +# F. The results on the PartialSpoof dataset + +TABLE IX PERFORMANCE IN EER $(\%)$ ON THE PARTIALSPOOF [51] DATASET. THE RESULTS ARE REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS. $\dagger$ INDICATES RESULTS OBTAINED FROM OUR IMPLEMENTATION. + +
Front-endYearBack-endPartialSpoof [51]
DevEval
wav2vec 2.02024gMLP [51]0.350.64
-gMLP†0.39 (0.43)0.72 (0.80)
20241D Res2Net [59]0.350.73
-1D Res2Net†0.35 (0.38)0.73 (0.79)
-SE ResNet†0.31 (0.50)0.77 (0.78)
-Nes2Net0.24 (0.36)0.53 (0.68)
-Nes2Net-X0.20 (0.33)0.57 (0.64)
+ +Partially manipulating a sentence can significantly alter its intended meaning [59]. When such manipulations occur in small regions, existing models trained on fully spoofed speech and relying on pooling functions struggle to detect these subtle changes. Consequently, there is growing interest in the detection of partially spoofed speech [51], [59], [75]. + +To evaluate the performance of our proposed model across different spoofing tasks, we conduct experiments on the PartialSpoof dataset [51]. The results are presented in Table IX. First, we reproduce the performance of two SOTA models, achieving results comparable to those reported in their original papers [51], [59]. Additionally, we evaluate SE ResNet, which demonstrated performance similar to the other baselines. In + +TABLE X THE PERFORMANCE IN EER $(\%)$ ON THE ASVspoof 2021 LA, DF [48], AND IN-THE-WILD [50] DATASETS. THE RESULTS ARE REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS. W/ AUG.' AND W/O AUG.' INDICATE WHETHER EVALUATION WITH AUGMENTATIONS ON THE VALIDATION SET IS USED TO SELECT THE BEST CHECKPOINT FOR TESTING. CKPT Avg. REFERS TO THE NUMBER OF CHECKPOINTS AVERAGED. + +
Back-endTrain SetCKPT Avg.w/ Aug.w/o Aug.
21LA [48]21DF [48]In-the-Wild [50]21LA [48]21DF [48]In-the-Wild [50]
Nes2Net-XASVspoof 19 [47]N/A1.63 (1.79)1.84 (2.03)5.56 (6.61)1.73 (1.95)1.65 (1.91)5.73 (6.83)
31.70 (1.80)1.88 (1.98)5.15 (6.31)1.66 (1.87)1.54 (1.98)5.59 (6.90)
51.67 (1.78)1.80 (1.91)5.28 (6.31)1.88 (2.00)1.49 (1.78)5.52 (6.60)
+ +contrast, our proposed Nes2Net and Nes2Net-X outperform all three baselines. + +# G. Empirical Runtime and Memory Analysis + +Number of parameters and MMACs are widely adopted metrics for evaluating model efficiency. These platform-independent measures offer consistent and fair comparisons across different hardware. However, to better reflect the real-world deployment costs of back-end architectures, we additionally benchmark their training time, inference time, and peak GPU memory usage, as summarized in Table XI. + +TABLE XI TRAINING AND INFERENCE EFFICIENCY COMPARISON ACROSS BACK-END MODELS. THE TABLE REPORTS THE AVERAGE (AVG.) TRAINING AND INFERENCE TIME PER BATCH IN MILLSECONDS (MS/BATCH), AS WELL AS PEAK GPU MEMORY USAGE IN MEGABYTES (MB). + +
Back-endAvg. Time (ms/batch)↓Peak GPU Memory↓ (MB)
TrainingInference
AASIST Light (C=24)27.07.81,327
AASIST Standard(C=32)53.818.73,454
AASIST Large(C=40)79.228.14,273
AASIST XL(C=48)86.130.75,087
AASIST XXL(C=56)100.937.45,905
ResNet7.82.6691
Res2Net15.63.5721
ECAPA-TDNN (C=128)9.43.1698
Proposed Nes2Net20.24.91,312
Proposed Nes2Net-X29.19.22,231
+ +All back-end models are evaluated under identical conditions: input features of 400 frames with 1024 dimensions, a batch size of 64, and execution on a dedicated NVIDIA H20 GPU. The first 10 batches are used for warm-up and excluded from the measurement, and the inference and training times are averaged over the subsequent 200 batches. Training time includes the forward, backward, and optimizer update steps. + +The results show that AASIST models exhibit rapidly increasing runtime and memory consumption as the channel dimension $C$ grows. In contrast, our proposed Nes2Net achieves notably lower latency and memory usage. Nes2Net-X further improves performance in some settings by preserving more high-dimensional information, albeit at the cost of higher resource consumption. + +Conventional models such as ResNet, Res2Net, and ECAPA-TDNN offer faster runtime and smaller memory footprints than our proposed method, but fall short in detection accuracy as shown in earlier experiments. Therefore, when selecting a back-end architecture, we believe both Nes2Net + +and Nes2Net-X offer flexible options: the former prioritizes efficiency, while the latter favors accuracy when computational resources permit. This underscores the importance of balancing performance and efficiency in real-world applications. + +# H. Should We Use Augmentation During Validation? + +In all previous experiments, the datasets are split into three non-overlapping subsets: training, validation (or development), and test sets. The validation set is used to select the best-performing checkpoints for final evaluation on the test set. The training set typically applies data augmentation to enhance model performance and generalization. However, the use of augmentation during validation remains inconsistent across prior studies. For instance, wav2vec 2.0-AASIST [15] applies the same augmentation strategy to both training and validation sets. In contrast, WavLM-AASIST [16] does not use augmentation on the validation set, aligning with common practices in speaker verification research [34], [76], [77]. + +In this section, we compare these two approaches and report the results in Table X. We observe that applying the same augmentation to the validation set as in the training set leads to worse performance on ASVspoof 2021 DF [48], but better results on In-the-Wild [50]. When no augmentation is applied to the validation set, the opposite trend is observed. + +From the outcome of the above study, we believe that in cases where robustness to certain variations (e.g., noise, compression, or distortions) is important, applying augmentation during validation provides insights into how well the model handles such conditions. As a result, the selected checkpoints from this approach may generalize better to these variations. Further investigation into this topic may yield deeper insights for future work. + +# VI. CONCLUSION + +In this work, we propose Nested Res2Net (Nes2Net) and its enhanced variant, Nes2Net-X, as lightweight and dimensionality reduction (DR) layer-free back-end architectures designed for speech anti-spoofing in the era of foundation models. Unlike conventional approaches that rely on a DR layer to bridge the mismatch between high-dimensional features and downstream classifiers, our proposed architectures directly process these rich representations. This not only eliminates the computational and parameter overhead introduced by DR layers but also avoids information loss, enhancing overall system efficiency and robustness. + +Nes2Net incorporates a novel nested multi-scale design that enables more effective feature extraction and deeper cross-channel interactions without increasing model complexity. + +The improved Nes2Net-X further strengthens representation learning by introducing learnable weighted feature fusion, offering adaptive control over the feature aggregation process. + +We conduct extensive evaluations across five representative datasets: CtrSVDD, ASVspoof 2021, ASVspoof 5, Partial-Spoof, and In-the-Wild, covering a wide range of singing voice deepfakes, fully spoofed speech, adversarial attacks, real-world deepfakes, and partially spoofed speech. Across all scenarios, our models achieve SOTA performance, demonstrating superior generalization, compactness, and resilience under unseen and challenging conditions. + +In summary, Nes2Net and Nes2Net-X offer a general-purpose, resource-efficient back-end for foundation model-based speech anti-spoofing, providing a practical yet powerful alternative to DR-dependent designs. To facilitate future research and applications, we make all source code and pretrained models publicly available. + +# REFERENCES + +[1] A. Baevski, Y. Zhou, A. Mohamed, and M. Auli, "wav2vec 2.0: A framework for self-supervised learning of speech representations," in Proc. Adv. Neural Inf. Process. Syst. (NeurIPS), vol. 33, 2020, pp. 12449-12460. +[2] W.-N. Hsu, B. Bolte, Y.-H. H. Tsai, K. Lakhotia, R. Salakhutdinov, and A. Mohamed, "HuBERT: Self-supervised speech representation learning by masked prediction of hidden units," IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 29, pp. 3451-3460, 2021. +[3] S. Chen, C. Wang, Z. Chen, Y. Wu, S. Liu, Z. Chen, J. Li, N. Kanda, T. Yoshioka, X. Xiao, J. Wu, L. Zhou, S. Ren, Y. Qian, Y. Qian, J. Wu, M. Zeng, X. Yu, and F. Wei, "WavLM: Large-scale self-supervised pre-training for full stack speech processing," IEEE J. Sel. Top. Signal Process., vol. 16, no. 6, pp. 1505-1518, 2022. +[4] A. T. Liu, S.-W. Li, and H.-y. Lee, “TERA: Self-supervised learning of transformer encoder representation for speech,” IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 29, pp. 2351-2366, 2021. +[5] J. Zhao and W.-Q. Zhang, "Improving automatic speech recognition performance for low-resource languages with self-supervised models," IEEE J. Sel. Top. Signal Process., vol. 16, no. 6, pp. 1227-1241, 2022. +[6] J. weon Jung, W. Zhang, J. Shi, Z. Aldeneh, T. Higuchi, A. Gichamba, B.-J. Theobald, A. Hussen Abdelaziz, and S. Watanabe, "ESPnet-SPK: full pipeline speaker embedding toolkit with reproducible recipes, self-supervised front-ends, and off-the-shelf models," in Proc. INTERSPEECH, 2024, pp. 4278-4282. +[7] M. Li, Y. Ahmadiadli, and X.-P. Zhang, "A survey on speech deepfake detection," ACM Comput. Surv., vol. 57, no. 7, 2025. +[8] N. M. Müller, P. Kawa, W. H. Choong, E. Casanova, E. Gölle, T. Müller, P. Syga, P. Sperl, and K. Böttinger, "MLAAD: The multi-language audio anti-spoofing dataset," in Proc. Int. Jt. Conf. Neural Netw. (IJCNN), 2024, pp. 1-7. +[9] Y. Xie, Y. Lu, R. Fu, Z. Wen, Z. Wang, J. Tao, X. Qi, X. Wang, Y. Liu, H. Cheng, L. Ye, and Y. Sun, "The codecfake dataset and countermeasures for the universally detection of deepfake audio," IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 33, pp. 386-400, 2025. +[10] R. K. Das, X. Tian, T. Kinnunen, and H. Li, “The attacker's perspective on automatic speaker verification: An overview,” in Proc. INTERSPEECH, 2020, pp. 4213–4217. +[11] J.-w. Jung, Y. Wu, X. Wang, J.-H. Kim, S. Maiti, Y. Matsunaga, H.-j. Shim, J. Tian, N. Evans, J. S. Chung, W. Zhang, S. Um, S. Takamichi, and S. Watanabe, "SpoofCeleb: Speech deepfake detection and SASV in the wild," IEEE Open J. Signal Process., vol. 6, pp. 68-77, 2025. +[12] J. Du, X. Chen, H. Wu, L. Zhang, I. Lin, I. Chiu, W. Ren, Y. Tseng, Y. Tsao, J.-S. R. Jang et al., "CodecFake-Omni: A large-scale codec-based deepfake speech dataset," arXiv preprint arXiv:2501.08238, 2025. +[13] X. Chen, H. Wu, R. Jang, and H. yi Lee, "Singing voice graph modeling for singfake detection," in Proc. INTERSPEECH, 2024, pp. 4843-4847. +[14] Y. Zhu, S. Koppisetti, T. Tran, and G. Bharaj, "SLIM: Style-linguistics mismatch model for generalized audio deepfake detection," in Proc. Adv. Neural Inf. Process. Syst. (NeurIPS), vol. 37, 2024, pp. 67901-67928. +[15] H. Tak, M. Todisco, X. Wang, J. weon Jung, J. Yamagishi, and N. Evans, "Automatic speaker verification spoofing and deepfake detection using wav2vec 2.0 and data augmentation," in Proc. Odyssey Speaker Lang. Recognit. Workshop, 2022, pp. 112-119. + +[16] A. Guragain, T. Liu, Z. Pan, H. B. Sailor, and Q. Wang, "Speech foundation model ensembles for the controlled singing voice deepfake detection (CtrSVDD) challenge 2024," in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024. +[17] Y. Xiao and R. K. Das, "XLSR-Mamba: A dual-column bidirectional state space model for spoofing attack detection," IEEE Signal Process Lett., vol. 32, pp. 1276-1280, 2025. +[18] S.-H. Gao, M.-M. Cheng, K. Zhao, X.-Y. Zhang, M.-H. Yang, and P. Torr, “Res2Net: A new multi-scale backbone architecture,” IEEE Trans. Pattern Anal. Mach. Intell., vol. 43, no. 2, pp. 652-662, 2021. +[19] K. He, X. Zhang, S. Ren, and J. Sun, "Deep residual learning for image recognition," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2016, pp. 770-778. +[20] Y. Chen, S. Zheng, H. Wang, L. Cheng, Q. Chen, and J. Qi, "An enhanced Res2Net with local and global feature fusion for speaker verification," in Proc. INTERSPEECH, 2023, pp. 2228-2232. +[21] Y. Chen, S. Zheng, H. Wang, L. Cheng, Q. Chen, S. Zhang, and J. Li, "ERes2NetV2: Boosting short-duration speaker verification performance with computational efficiency," in Proc. INTERSPEECH, 2024, pp. 3245-3249. +[22] T. Liu, K. A. Lee, Q. Wang, and H. Li, "Golden Gemini is all you need: Finding the sweet spots for speaker verification," IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 32, pp. 2324-2337, 2024. +[23] X. Li, X. Wu, H. Lu, X. Liu, and H. Meng, "Channel-wise gated Res2Net: Towards robust detection of synthetic speech attacks," in Proc. INTERSPEECH, 2021, pp. 4314-4318. +[24] J. Kim and S. M. Ban, "Phase-aware spoof speech detection based on Res2Net with phase network," in Proc. ICASSP, 2023, pp. 1-5. +[25] T. Liu, I. Kukanov, Z. Pan, Q. Wang, H. B. Sailor, and K. A. Lee, "Towards quantifying and reducing language mismatch effects in cross-lingual speech anti-spoofing," in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024, pp. 1185-1192. +[26] J.-w. Jung, H.-S. Heo, H. Tak, H.-j. Shim, J. S. Chung, B.-J. Lee, H.-J. Yu, and N. Evans, "AASIST: Audio anti-spoofing using integrated spectro-temporal graph attention networks," in Proc. ICASSP, 2022, pp. 6367-6371. +[27] Y. Chen, J. Yi, J. Xue, C. Wang, X. Zhang, S. Dong, S. Zeng, J. Tao, Z. Lv, and C. Fan, "RawBMamba: End-to-end bidirectional state space model for audio deepfake detection," in Proc. INTERSPEECH, 2024, pp. 2720-2724. +[28] Y. Chen, H. Wu, N. Jiang, X. Xia, Q. Gu, Y. Hao, P. Cai, Y. Guan, J. Wang, W. Xie et al., "Ustc-kxdigit system description for asvsproof5 challenge," arXiv preprint arXiv:2409.01695, 2024. +[29] Z. Wei, D. Ye, J. Deng, and Y. Lin, “From voices to beats: Enhancing music deepfake detection by identifying forgeries in background,” in Proc. ICASSP, 2025, pp. 1-5. +[30] Y. Guan, Y. Ai, Z. Li, S. Peng, and W. Guo, "Recursive feature learning from pre-trained models for spoofing speech detection," in Proc. ICASSP, 2025, pp. 1-5. +[31] Z. Pan, T. Liu, H. B. Sailor, and Q. Wang, "Attentive merging of hidden embeddings from pre-trained speech model for anti-spoofing detection," in Proc. INTERSPEECH, 2024, pp. 2090-2094. +[32] M. Huaifah, T. Liu, H. B. Sailor, K. M. Tan, T. K. Vangani, Q. Wang, J. H. Wong, N. F. Chen, and A. T. Aw, "Towards a speech foundation model for Singapore and beyond," arXiv preprint arXiv:2412.11538, 2024. +[33] D.-T. Truong, R. Tao, T. Nguyen, H.-T. Luong, K. A. Lee, and E. S. Chng, “Temporal-channel modeling in multi-head self-attention for synthetic speech detection,” in Proc. INTERSPEECH, 2024, pp. 537–541. +[34] B. Desplanques, J. Thienpondt, and K. Demuynck, "ECAPA-TDNN: Emphasized channel attention, propagation and aggregation in TDNN based speaker verification," in Proc. INTERSPEECH, 2020, pp. 3830-3834. +[35] Q. Zhang, S. Wen, and T. Hu, "Audio deepfake detection with self-supervised XLS-R and SLS classifier," in Proc. ACM Int. Conf. Multimedia, 2024, pp. 6765-6773. +[36] Z. Ge, X. Xu, H. Guo, Z. Yang, and B. Schuller, "Gncl: A graph neural network with consistency loss for segment-level spoofed speech detection," in Proc. ICASSP, 2025, pp. 1-5. +[37] X. Wang, H. Delgado, H. Tak, J. weon Jung, H. jin Shim, M. Todisco, I. Kukanov, X. Liu, M. Sahidullah, T. H. Kinnunen, N. Evans, K. A. Lee, and J. Yamagishi, "ASVspoof 5: crowdsourced speech data, deepfakes, and adversarial attacks at scale," in Autom. Speaker Verif. Spoofing Countermeas. Workshop, 2024, pp. 1-8. + +[38] Y. Zhang, Y. Zang, J. Shi, R. Yamamoto, T. Toda, and Z. Duan, "SVDD 2024: The inaugural singing voice deepfake detection challenge," in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024, pp. 782-787. +[39] Q. Zhang, S. Wen, F. Yan, T. Hu, and J. Li, "XWSB: A blend system utilizing XLS-R and WavLM with SLS classifier detection system for SVDD 2024 challenge," in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024, pp. 788-794. +[40] J. Yi, J. Tao, R. Fu, X. Yan, C. Wang, T. Wang, C. Y. Zhang, X. Zhang, Y. Zhao, Y. Ren et al., "ADD 2023: the second audio deepfake detection challenge," arXiv preprint arXiv:2305.13774, 2023. +[41] J. Hu, L. Shen, and G. Sun, "Squeeze-and-excitation networks," in Proc. IEEE Conf. Comput. Vis. Pattern Recog. (CVPR), 2018. +[42] K. Okabe, T. Koshinaka, and K. Shinoda, "Attentive statistics pooling for deep speaker embedding," in Proc. INTERSPEECH, 2018, pp. 2252-2256. +[43] T. Zhou, Y. Zhao, and J. Wu, "ResNeXt and Res2Net structures for speaker verification," in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2021, pp. 301-307. +[44] Q. Wang, B. Wu, P. Zhu, P. Li, W. Zuo, and Q. Hu, "ECA-Net: Efficient channel attention for deep convolutional neural networks," in Proc. IEEE Conf. Comput. Vis. Pattern Recog. (CVPR), 2020, pp. 11531-11539. +[45] T. Liu, R. K. Das, K. A. Lee, and H. Li, "MFA: TDNN with multi-scale frequency-channel attention for text-independent speaker verification with short utterances," in Proc. ICASSP, 2022, pp. 7517-7521. +[46] Y. Zang, J. Shi, Y. Zhang, R. Yamamoto, J. Han, Y. Tang, S. Xu, W. Zhao, J. Guo, T. Toda, and Z. Duan, "CtrSVDD: A benchmark dataset and baseline analysis for controlled singing voice deepfake detection," in Proc. INTERSPEECH, 2024, pp. 4783-4787. +[47] X. Wang, J. Yamagishi, M. Todisco, H. Delgado, A. Nautsch, N. Evans, M. Sahidullah, V. Vestman, T. Kinnunen, K. A. Lee, L. Juvela, P. Alku, Y.-H. Peng, H.-T. Hwang, Y. Tsao, H.-M. Wang, S. L. Maguer, M. Becker, F. Henderson, R. Clark, Y. Zhang, Q. Wang, Y. Jia, K. Onuma, K. Mushika, T. Kaneda, Y. Jiang, L.-J. Liu, Y.-C. Wu, W.-C. Huang, T. Toda, K. Tanaka, H. Kameoka, I. Steiner, D. Matrouf, J.-F. Bonastre, A. Govender, S. Ronanki, J.-X. Zhang, and Z.-H. Ling, "ASVspoof 2019: A large-scale public database of synthesized, converted and replayed speech," Comput. Speech Lang., vol. 64, p. 101114, 2020. +[48] J. Yamagishi, X. Wang, M. Todisco, M. Sahidullah, J. Patino, A. Nautsch, X. Liu, K. A. Lee, T. Kinnunen, N. Evans, and H. Delgado, "ASVspoof 2021: accelerating progress in spoofed and deepfake speech detection," in Autom. Speaker Verif. Spoofing Countermeas. Challenge, 2021, pp. 47-54. +[49] X. Wang, H. Delgado, H. Tak, J.-w. Jung, H.-j. Shim, M. Todisco, I. Kukanov, X. Liu, M. Sahidullah, T. Kinnunen et al., "ASVspoof 5: Design, collection and validation of resources for spoofing, deepfake, and adversarial attack detection using crowdsourced speech," arXiv preprint arXiv:2502.08857, 2025. +[50] N. M. Müller, P. Czempin, F. Dieckmann, A. Froghyar, and K. Böttinger, "Does audio deepfake detection generalize?" in Proc. INTERSPEECH, 2022, pp. 2783-2787. +[51] L. Zhang, X. Wang, E. Cooper, N. Evans, and J. Yamagishi, "The PartialProof database and countermeasures for the detection of short fake speech segments embedded in an utterance," IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 31, pp. 813-825, 2023. +[52] Y. Zang, Y. Zhang, M. Heydari, and Z. Duan, "SingFake: Singing voice deepfake detection," in Proc. ICASSP, 2024, pp. 12156-12160. +[53] Y. Xie, J. Zhou, X. Lu, Z. Jiang, Y. Yang, H. Cheng, and L. Ye, "FSD: An initial chinese dataset for fake song detection," in Proc. ICASSP, 2024, pp. 4605-4609. +[54] T.-Y. Lin, P. Goyal, R. Girshick, K. He, and P. Dollar, “Focal loss for dense object detection,” in IEEE Int. Conf. Comput. Vis. (ICCV), 2017, pp. 2980–2988. +[55] H. Tak, M. Kamble, J. Patino, M. Todisco, and N. Evans, "Rawboost: A raw data boosting and augmentation method applied to automatic speaker verification anti-spoofing," in Proc. ICASSP, 2022, pp. 6382-6386. + +[56] D. P. Kingma and J. Ba, “Adam: A method for stochastic optimization,” in Int. Conf. Learn. Represent., 2015. +[57] D. Snyder, G. Chen, and D. Povey, “Musan: A music, speech, and noise corpus,” arXiv preprint arXiv:1510.08484, 2015. +[58] T. Ko, V. Peddinti, D. Povey, M. L. Seltzer, and S. Khudanpur, “A study on data augmentation of reverberant speech for robust speech recognition,” in 2017 IEEE international conference on acoustics, speech and signal processing (ICASSP). IEEE, 2017, pp. 5220–5224. +[59] T. Liu, L. Zhang, R. K. Das, Y. Ma, R. Tao, and H. Li, "How do neural spoofing countermeasures detect partially spoofed audio?" in Proc. INTERSPEECH, 2024, pp. 1105-1109. +[60] X. Wang and J. Yamagishi, “A comparative study on recent neural spoofing countermeasures for synthetic speech detection,” in Proc. INTERSPEECH, 2021, pp. 4259–4263. +[61] J. M. Martin-Doñas and A. Álvarez, “The Vicomtech audio deepfake detection system based on wav2vec2 for the 2022 ADD challenge,” in Proc. ICASSP, 2022, pp. 9241–9245. +[62] X. Wang and J. Yamagishi, “Investigating self-supervised front ends for speech spoofing countermeasures,” in Proc. Odyssey Speaker Lang. Recognit. Workshop, 2022, pp. 100–106. +[63] E. Rosello, A. Gomez-Alanis, A. M. Gomez, and A. Peinado, “A conformer-based classifier for variable-length utterance processing in anti-spoofing,” in Proc. INTERSPEECH, 2023, pp. 5281-5285. +[64] E. Rosello, A. M. Gomez, I. López-Espejo, A. M. Peinado, and J. M. Martín-Doñas, “Anti-spoofing ensembling model: Dynamic weight allocation in ensemble models for improved voice biometrics security,” in Proc. INTERSPEECH, 2024, pp. 497–501. +[65] H. M. Tran, D. Guennec, P. Martin, A. Sini, D. Loline, A. Delhay, and P.-F. Marteau, "Spoofed speech detection with a focus on speaker embedding," in Proc. INTERSPEECH, 2024, pp. 2080-2084. +[66] B. Wang, Y. Tang, F. Wei, Z. Ba, and K. Ren, "FTDKD: Frequency-time domain knowledge distillation for low-quality compressed audio deepfake detection," IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 32, pp. 4905-4918, 2024. +[67] Y. Zhang, J. Lu, Z. Shang, W. Wang, and P. Zhang, “Improving short utterance anti-spoofing with AASIST2,” in Proc. ICASSP, 2024, pp. 11636-11640. +[68] Y. Guo, H. Huang, X. Chen, H. Zhao, and Y. Wang, "Audio deepfake detection with self-supervised WavLm and multi-fusion attentive classifier," in Proc. ICASSP, 2024, pp. 12702-12706. +[69] Z. Wang, R. Fu, Z. Wen, J. Tao, X. Wang, Y. Xie, X. Qi, S. Shi, Y. Lu, Y. Liu et al., "Mixture of experts fusion for fake audio detection using frozen wav2vec 2.0," arXiv preprint arXiv:2409.11909, 2024. +[70] J. Lu, Y. Zhang, W. Wang, Z. Shang, and P. Zhang, “One-class knowledge distillation for spoofing speech detection,” in Proc. ICASSP, 2024, pp. 11251-11255. +[71] W. Huang, Y. Gu, Z. Wang, H. Zhu, and Y. Qian, "Generalizable audio deepfake detection via latent space refinement and augmentation," in Proc. ICASSP, 2025, pp. 1-5. +[72] Z. Jin, L. Lang, and B. Leng, "Wave-spectrogram cross-modal aggregation for audio deepfake detection," in Proc. ICASSP, 2025, pp. 1-5. +[73] C. Y. Kwok, D.-T. Truong, and J. Q. Yip, "Robust audio deepfake detection using ensemble confidence calibration," in Proc. ICASSP, 2025, pp. 1-5. +[74] O. Pascu, A. Stan, D. Oneata, E. Oneata, and H. Cucu, "Towards generalisable and calibrated audio deepfake detection with self-supervised representations," in Proc. INTERSPEECH, 2024, pp. 4828-4832. +[75] H.-T. Luong, H. Li, L. Zhang, K. A. Lee, and E. S. Chng, “LlamaPartial-Spoof: An LLM-driven fake speech dataset simulating disinformation generation,” arXiv preprint arXiv:2409.14743, 2024. +[76] T. Liu, K. A. Lee, Q. Wang, and H. Li, "Disentangling voice and content with self-supervision for speaker recognition," Proc. Adv. Neural Inf. Process. Syst. (NeurIPS), vol. 36, pp. 50221-50236, 2023. +[77] S. Wang, Z. Chen, K. A. Lee, Y. Qian, and H. Li, “Overview of speaker modeling and its applications: From the lens of deep speaker representation learning,” IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 32, pp. 4971–4998, 2024. \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05657/images/3cc1916acf788294e6c8ae9cfda94679fa734b07114b7b845c6ca02cdef7c997.jpg b/data/2025/2504_05xxx/2504.05657/images/3cc1916acf788294e6c8ae9cfda94679fa734b07114b7b845c6ca02cdef7c997.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e576b677dc337326ad1fb168b2b6454c91f6d01a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/3cc1916acf788294e6c8ae9cfda94679fa734b07114b7b845c6ca02cdef7c997.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e579240cc1379e46bcf2264a7aa8580bbd27138d941cb36bed19d296cdb1b19e +size 135540 diff --git a/data/2025/2504_05xxx/2504.05657/images/42c4d557aa680975a756269def714eecc57dc1e96b22095e36c26eabbddd4813.jpg b/data/2025/2504_05xxx/2504.05657/images/42c4d557aa680975a756269def714eecc57dc1e96b22095e36c26eabbddd4813.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f26f0778cc86bccb3d64bc0dff3b2f9b99a17dc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/42c4d557aa680975a756269def714eecc57dc1e96b22095e36c26eabbddd4813.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b20712e7439baa6900febd364faf181df46347c40221c1182d1db012b7e775ef +size 7578 diff --git a/data/2025/2504_05xxx/2504.05657/images/5e9d3dac3968c2aca3602c7becebdf97db62a94ae78c7df5ff863f2e6c45e0e0.jpg b/data/2025/2504_05xxx/2504.05657/images/5e9d3dac3968c2aca3602c7becebdf97db62a94ae78c7df5ff863f2e6c45e0e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db918518ee576cce6ec055abf2547e54406c4bf1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/5e9d3dac3968c2aca3602c7becebdf97db62a94ae78c7df5ff863f2e6c45e0e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:229a870a18daf9b875d88620e093858beca439e019f54e7f9bed7354f0d86866 +size 6494 diff --git a/data/2025/2504_05xxx/2504.05657/images/60a1d06954731a1d8497f7ad39edfbb82ed60f30aa0d12cd56c49475a8a4119e.jpg b/data/2025/2504_05xxx/2504.05657/images/60a1d06954731a1d8497f7ad39edfbb82ed60f30aa0d12cd56c49475a8a4119e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e069061227f23e498e19895f51180e95b633f4cd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/60a1d06954731a1d8497f7ad39edfbb82ed60f30aa0d12cd56c49475a8a4119e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b994118425ef54ff1334a96ba99b13a427fe186b553b7746da04d81c71e98efd +size 154826 diff --git a/data/2025/2504_05xxx/2504.05657/images/659129421e45a9fe814172a8f9a34bba84b17527efa124efa6a95150223c258d.jpg b/data/2025/2504_05xxx/2504.05657/images/659129421e45a9fe814172a8f9a34bba84b17527efa124efa6a95150223c258d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f94014f208d3c64ca7d619a885d514f0244bb627 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/659129421e45a9fe814172a8f9a34bba84b17527efa124efa6a95150223c258d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:676aa7269f77783abf1c045f86e38f50e4ad413aff3b48364ef2e15caff7c13c +size 29521 diff --git a/data/2025/2504_05xxx/2504.05657/images/79157a288ac5f97faf334aba645a2a71ff54ca0a6409da7b5db961540f267480.jpg b/data/2025/2504_05xxx/2504.05657/images/79157a288ac5f97faf334aba645a2a71ff54ca0a6409da7b5db961540f267480.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9b0fe074c18fb11fc41d19479b49fa995c51436a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/79157a288ac5f97faf334aba645a2a71ff54ca0a6409da7b5db961540f267480.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fdcf80888c531df724ed0e68e98a4571dcbe448b963b270579570db4872a8a9 +size 47557 diff --git a/data/2025/2504_05xxx/2504.05657/images/7e69571a4ff0ffaa47a0d2c05ca2240776de844853e960ce84ab1186458a45c6.jpg b/data/2025/2504_05xxx/2504.05657/images/7e69571a4ff0ffaa47a0d2c05ca2240776de844853e960ce84ab1186458a45c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d22b3601d9df39671a79bf035de9a93e91f8f554 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/7e69571a4ff0ffaa47a0d2c05ca2240776de844853e960ce84ab1186458a45c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d4fd776374eaaa9ecba7e488fbf24a73184f653efb88cdb35af5b09b60fbcb5 +size 5147 diff --git a/data/2025/2504_05xxx/2504.05657/images/8e23480718b64a36e60a649ebac4e58f7fde3d839dc800ff33cb052003b55e25.jpg b/data/2025/2504_05xxx/2504.05657/images/8e23480718b64a36e60a649ebac4e58f7fde3d839dc800ff33cb052003b55e25.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89d63b4b845be7b111d4bd6774ad952de59f3300 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/8e23480718b64a36e60a649ebac4e58f7fde3d839dc800ff33cb052003b55e25.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89440fb5c94323968c9ba51f24cec0182169ce7be43e6ad42d16ee955139ab3c +size 124963 diff --git a/data/2025/2504_05xxx/2504.05657/images/98b5b166adf5a4085d47308265f5b1d43548aef5667c9a311dea779157979ccf.jpg b/data/2025/2504_05xxx/2504.05657/images/98b5b166adf5a4085d47308265f5b1d43548aef5667c9a311dea779157979ccf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b15a2ddc3bb25b86888bcdd4cbba45c3c6495a23 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/98b5b166adf5a4085d47308265f5b1d43548aef5667c9a311dea779157979ccf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:178c440f699a6bfef1df38ec4488ca8c598e4dff5565a1171c4728e38c7158e1 +size 41188 diff --git a/data/2025/2504_05xxx/2504.05657/images/9d42cb0231914a7b71d6b7e6fa59b6b9bb1fdc4444517d6b37eb83903910150a.jpg b/data/2025/2504_05xxx/2504.05657/images/9d42cb0231914a7b71d6b7e6fa59b6b9bb1fdc4444517d6b37eb83903910150a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..703415f8c5baffbfc78bfab46218067571af40e0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/9d42cb0231914a7b71d6b7e6fa59b6b9bb1fdc4444517d6b37eb83903910150a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:745644f40495b8dce155f419ee33272862e5500243063ad9b15f1c1c779da9f6 +size 50173 diff --git a/data/2025/2504_05xxx/2504.05657/images/aaf4389be4b7945f02c8bda6ea039fe9c5e53dd5e8bc867b886ce5c647d07ecd.jpg b/data/2025/2504_05xxx/2504.05657/images/aaf4389be4b7945f02c8bda6ea039fe9c5e53dd5e8bc867b886ce5c647d07ecd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea75d891061ea02931b7df903f999d74ad43e74d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/aaf4389be4b7945f02c8bda6ea039fe9c5e53dd5e8bc867b886ce5c647d07ecd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:812fc6e8c83aafa79cf4d1c99cf3cbbcbf46b3138db02833682e4246c2a57d3e +size 268772 diff --git a/data/2025/2504_05xxx/2504.05657/images/b205df6253934c495cd6bf54f649c643eae158455b8a5a57b5838ccd4e16a70f.jpg b/data/2025/2504_05xxx/2504.05657/images/b205df6253934c495cd6bf54f649c643eae158455b8a5a57b5838ccd4e16a70f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb463e4fc8f11ee07b1e2f2e0d48f02e61f3dce5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/b205df6253934c495cd6bf54f649c643eae158455b8a5a57b5838ccd4e16a70f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0e60017eb6e9cb3d237d5f50b9971d17494f41128e9b11366204ed5be2eea68 +size 85388 diff --git a/data/2025/2504_05xxx/2504.05657/images/bb3829ac42d9a686c979bc4e9bb61faab459c4bb9e22a76b5267ae278546f1d8.jpg b/data/2025/2504_05xxx/2504.05657/images/bb3829ac42d9a686c979bc4e9bb61faab459c4bb9e22a76b5267ae278546f1d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc4c5026446412e60cb86f506ff7aa5c4bf8af4a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/bb3829ac42d9a686c979bc4e9bb61faab459c4bb9e22a76b5267ae278546f1d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf6629080a73e1158e112b54cabebf5edc6ce8661fe09a9e136d7c585284c372 +size 146741 diff --git a/data/2025/2504_05xxx/2504.05657/images/bd43c91baeed1799a4c358e11c48f71336db4a8f12d9b56d330407e1dc303c30.jpg b/data/2025/2504_05xxx/2504.05657/images/bd43c91baeed1799a4c358e11c48f71336db4a8f12d9b56d330407e1dc303c30.jpg new file mode 100644 index 0000000000000000000000000000000000000000..40aa2a9846df56f604090330af8d2963d21f4792 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/bd43c91baeed1799a4c358e11c48f71336db4a8f12d9b56d330407e1dc303c30.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ab203c3e7ffab6c644b48fe9e18bd2bea26c5bcffe2ae4419fed3532e0c5a0e +size 50249 diff --git a/data/2025/2504_05xxx/2504.05657/images/cdb077715738a859647716de99f3da764ab98bc0bbef54fc6a8d8d889b7cb8ce.jpg b/data/2025/2504_05xxx/2504.05657/images/cdb077715738a859647716de99f3da764ab98bc0bbef54fc6a8d8d889b7cb8ce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ad1db570386a89150d1e5be68009b64712497d1f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/cdb077715738a859647716de99f3da764ab98bc0bbef54fc6a8d8d889b7cb8ce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:068978e1702fd452bc004e6efa4863b13ee79352088fb52dc072208651aa2762 +size 4228 diff --git a/data/2025/2504_05xxx/2504.05657/images/dabbcf97be8c75c0deb7beb748fdb20e0361404e5c527c4f5e1d38e00b935d08.jpg b/data/2025/2504_05xxx/2504.05657/images/dabbcf97be8c75c0deb7beb748fdb20e0361404e5c527c4f5e1d38e00b935d08.jpg new file mode 100644 index 0000000000000000000000000000000000000000..415e4252673fa4d395af5d88e664af963c94fd97 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/dabbcf97be8c75c0deb7beb748fdb20e0361404e5c527c4f5e1d38e00b935d08.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9046db32051985fe2b8c2ceb9ab16778db912622e219f348c33c5d5d7693fda9 +size 30542 diff --git a/data/2025/2504_05xxx/2504.05657/images/efe1584a963bd86fedb0e112a862eb4e48cd69fc72a15bbe69361f939014ea25.jpg b/data/2025/2504_05xxx/2504.05657/images/efe1584a963bd86fedb0e112a862eb4e48cd69fc72a15bbe69361f939014ea25.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4097cca81b71812e6be1d10edcc0751a7464d2c9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/efe1584a963bd86fedb0e112a862eb4e48cd69fc72a15bbe69361f939014ea25.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e137f9d4ab134223096aaf3f5bd669f40cc079d8984935aed915dcb1030cb60d +size 21265 diff --git a/data/2025/2504_05xxx/2504.05657/images/ff815d3d676fe98054c7f2384cec4ecceab1946ec4936157be8fb0c8b58b53f0.jpg b/data/2025/2504_05xxx/2504.05657/images/ff815d3d676fe98054c7f2384cec4ecceab1946ec4936157be8fb0c8b58b53f0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d3129e7d8d570bc44c832e251962e8cdf1ddde72 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/images/ff815d3d676fe98054c7f2384cec4ecceab1946ec4936157be8fb0c8b58b53f0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78e39c8d553e42b33068f9bd46e6ce2f4027ea4ec03230ec09f7f65e6d743cae +size 36022 diff --git a/data/2025/2504_05xxx/2504.05657/layout.json b/data/2025/2504_05xxx/2504.05657/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..b08174daff8b645a5b97db17d614f9bb98e433e9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05657/layout.json @@ -0,0 +1,13519 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 56, + 55, + 193, + 66 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 55, + 193, + 66 + ], + "spans": [ + { + "bbox": [ + 56, + 55, + 193, + 66 + ], + "type": "text", + "content": "The current version is 'Preprint'." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 68, + 299, + 102 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 68, + 299, + 102 + ], + "spans": [ + { + "bbox": [ + 48, + 68, + 299, + 102 + ], + "type": "text", + "content": "This work has been submitted to the IEEE for possible publication. Copyright may be transferred without notice, after which this version may no longer be accessible." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 57, + 103, + 288, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 103, + 288, + 114 + ], + "spans": [ + { + "bbox": [ + 57, + 103, + 288, + 114 + ], + "type": "text", + "content": "This information aligns with the guidelines available at:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 116, + 296, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 116, + 296, + 150 + ], + "spans": [ + { + "bbox": [ + 47, + 116, + 296, + 150 + ], + "type": "text", + "content": "https://journals.ieeethorcenter.ieee.org/become-an-iiie-journal-author/publishing-ethics/guidelines-and-policies/post-publication-policies/" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 15, + 213, + 35, + 577 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 15, + 213, + 35, + 577 + ], + "spans": [ + { + "bbox": [ + 15, + 213, + 35, + 577 + ], + "type": "text", + "content": "arXiv:2504.05657v2 [eess.AS] 26 Oct 2025" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 65, + 56, + 547, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 56, + 547, + 111 + ], + "spans": [ + { + "bbox": [ + 65, + 56, + 547, + 111 + ], + "type": "text", + "content": "Nes2Net: A Lightweight Nested Architecture for Foundation Model Driven Speech Anti-spoofing" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 117, + 541, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 117, + 541, + 144 + ], + "spans": [ + { + "bbox": [ + 69, + 117, + 541, + 144 + ], + "type": "text", + "content": "Tianchi Liu, Student Member, Duc-Tuan Truong, Student Member, Rohan Kumar Das, Senior Member, Kong Aik Lee, Senior Member, Haizhou Li, Fellow" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 186, + 301, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 186, + 301, + 416 + ], + "spans": [ + { + "bbox": [ + 45, + 186, + 301, + 416 + ], + "type": "text", + "content": "Abstract—Speech foundation models have significantly advanced various speech-related tasks by providing exceptional representation capabilities. However, their high-dimensional output features often create a mismatch with downstream task models, which typically require lower-dimensional inputs. A common solution is to apply a dimensionality reduction (DR) layer, but this approach increases parameter overhead, computational costs, and risks losing valuable information. To address these issues, we propose Nested Res2Net (Nes2Net), a lightweight back-end architecture designed to directly process high-dimensional features without DR layers. The nested structure enhances multi-scale feature extraction, improves feature interaction, and preserves high-dimensional information. We first validate Nes2Net on CtrSVDD, a singing voice deepfake detection dataset, and report a " + }, + { + "bbox": [ + 45, + 186, + 301, + 416 + ], + "type": "inline_equation", + "content": "22\\%" + }, + { + "bbox": [ + 45, + 186, + 301, + 416 + ], + "type": "text", + "content": " performance improvement and an " + }, + { + "bbox": [ + 45, + 186, + 301, + 416 + ], + "type": "inline_equation", + "content": "87\\%" + }, + { + "bbox": [ + 45, + 186, + 301, + 416 + ], + "type": "text", + "content": " back-end computational cost reduction over the state-of-the-art baseline. Additionally, extensive testing across four diverse datasets: ASVspoof 2021, ASVspoof 5, PartialSpoof, and In-the-Wild, covering fully spoofed speech, adversarial attacks, partial spoofing, and real-world scenarios, consistently highlights Nes2Net's superior robustness and generalization capabilities. The code package and pre-trained models are available at https://github.com/Liu-Tianchi/Nes2Net." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 422, + 299, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 422, + 299, + 444 + ], + "spans": [ + { + "bbox": [ + 46, + 422, + 299, + 444 + ], + "type": "text", + "content": "Index Terms—DeepFake detection, speech anti-spoofing, Res2Net, Nes2Net, SSL, speech foundation model" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 462, + 215, + 473 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 462, + 215, + 473 + ], + "spans": [ + { + "bbox": [ + 132, + 462, + 215, + 473 + ], + "type": "text", + "content": "I. INTRODUCTION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 479, + 301, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 479, + 301, + 552 + ], + "spans": [ + { + "bbox": [ + 45, + 479, + 301, + 552 + ], + "type": "text", + "content": "SPEECH foundation models, such as wav2vec 2.0 [1], HuBERT [2], and WavLM [3], have revolutionized speech processing by leveraging large-scale pretraining to capture complex acoustic and linguistic patterns [4]. This has driven notable advances in automatic speech recognition (ASR) [5], speaker verification (SV) [6], and other speech applications." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 552, + 301, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 552, + 301, + 612 + ], + "spans": [ + { + "bbox": [ + 45, + 552, + 301, + 612 + ], + "type": "text", + "content": "Beyond traditional tasks, speech foundation models also show great promise in addressing critical security concerns, particularly speech anti-spoofing (also referred to as deepfake detection) [7]. With the growing sophistication of spoofing techniques, such as voice conversion, ensuring the reliability" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 621, + 301, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 621, + 301, + 648 + ], + "spans": [ + { + "bbox": [ + 45, + 621, + 301, + 648 + ], + "type": "text", + "content": "Tianchi Liu and Haizhou Li are with the Department of Electrical and Computer Engineering, National University of Singapore, Singapore. Tianchi Liu is also with LIGHTSPEED, Singapore (email: tianchi.liu@u.nus.edu);" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 648, + 301, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 648, + 301, + 666 + ], + "spans": [ + { + "bbox": [ + 45, + 648, + 301, + 666 + ], + "type": "text", + "content": "Duc-Tuan Truong is with the Nanyang Technological University, Singapore (email: truongdu001@e.ntu.edu.sg);" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 666, + 301, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 666, + 301, + 685 + ], + "spans": [ + { + "bbox": [ + 45, + 666, + 301, + 685 + ], + "type": "text", + "content": "Rohan Kumar Das is with the Fortemedia Singapore, Singapore (email: ecerohan@gmail.com);" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 684, + 301, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 684, + 301, + 720 + ], + "spans": [ + { + "bbox": [ + 45, + 684, + 301, + 720 + ], + "type": "text", + "content": "Kong Aik Lee is with the Department of Electrical and Electronic Engineering and the Research Centre for Data Science & Artificial Intelligence, The Hong Kong Polytechnic University, Hong Kong (e-mail: kongaik.lee@polyu.edu.hk);" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 45, + 720, + 301, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 720, + 301, + 749 + ], + "spans": [ + { + "bbox": [ + 45, + 720, + 301, + 749 + ], + "type": "text", + "content": "Haizhou Li is also with the Shenzhen Research Institute of Big Data, School of Artificial Intelligence, School of Data Science, The Chinese University of Hong Kong, Shenzhen, China (email: haizhouli@cuhk.edu.cn)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 186, + 563, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 563, + 234 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 563, + 234 + ], + "type": "text", + "content": "and security of speech-driven systems has become a pressing concern [8]–[12]. Leveraging the rich representations of these foundation models could significantly improve the robustness and generalization of anti-spoofing systems [13]–[15]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 234, + 564, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 234, + 564, + 460 + ], + "spans": [ + { + "bbox": [ + 307, + 234, + 564, + 460 + ], + "type": "text", + "content": "While speech foundation models offer exceptional representations, their high-dimensional feature outputs present significant challenges for downstream tasks. Downstream models used in tasks like speech anti-spoofing typically require lower-dimensional features [15]–[17]. To address this mismatch, a common approach is to introduce a dimensionality reduction (DR) layer, usually implemented as a fully connected (FC) layer for transforming high-dimensional features into lower-dimensional features. However, this conventional strategy presents notable drawbacks. Given that downstream classifiers are typically compact [15], [16], the DR layer alone often consumes a substantial portion of the parameters and computational resources within the entire back-end model. Moreover, directly projecting high-dimensional features in a one-shot manner through an FC layer leads to the loss of important information, reducing the effectiveness of speech foundation models. These issues highlight the need for a more efficient and effective solution to bridge the dimensionality gap and fully utilize speech foundation models in downstream tasks." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 460, + 564, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 460, + 564, + 581 + ], + "spans": [ + { + "bbox": [ + 307, + 460, + 564, + 581 + ], + "type": "text", + "content": "To address these challenges, we propose Nested Res2Net (Nes2Net) to process high-dimensional features from speech foundation models, eliminating the need for a DR layer while preserving the richness of the original representations. By addressing key limitations of DR layers, such as excessive computational cost and information loss, Nes2Net offers a more efficient and effective solution. This design makes it particularly suitable for tasks requiring a balance of high performance and efficiency, such as speech anti-spoofing. The key contributions of this work can be summarized as follows:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 318, + 581, + 564, + 749 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 318, + 581, + 563, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 581, + 563, + 628 + ], + "spans": [ + { + "bbox": [ + 318, + 581, + 563, + 628 + ], + "type": "text", + "content": "- Novel Architecture: We introduce Nes2Net, a new approach that effectively addresses the limitations of DR layers. Nes2Net retains the expressive power of high-dimensional features while reducing model complexity." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 318, + 629, + 564, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 629, + 564, + 712 + ], + "spans": [ + { + "bbox": [ + 318, + 629, + 564, + 712 + ], + "type": "text", + "content": "- Enhanced Performance, Efficiency, and Generalization: Our method demonstrates a " + }, + { + "bbox": [ + 318, + 629, + 564, + 712 + ], + "type": "inline_equation", + "content": "22\\%" + }, + { + "bbox": [ + 318, + 629, + 564, + 712 + ], + "type": "text", + "content": " performance gain and an " + }, + { + "bbox": [ + 318, + 629, + 564, + 712 + ], + "type": "inline_equation", + "content": "87\\%" + }, + { + "bbox": [ + 318, + 629, + 564, + 712 + ], + "type": "text", + "content": " reduction in computational costs compared to the state-of-the-art baselines on the CtrlSVDD dataset. Further experiments conducted on four additional datasets across various scenarios demonstrate strong generalization capability and consistently superior performance." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 318, + 712, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 712, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 318, + 712, + 564, + 749 + ], + "type": "text", + "content": "- Reproducibility: To facilitate further research and application, we make our scripts and pre-trained models publicly available." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 557, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 557, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 557, + 24, + 563, + 32 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 129, + 55, + 218, + 65 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 55, + 218, + 65 + ], + "spans": [ + { + "bbox": [ + 129, + 55, + 218, + 65 + ], + "type": "text", + "content": "II. RELATED WORK" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 70, + 96, + 80 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 70, + 96, + 80 + ], + "spans": [ + { + "bbox": [ + 45, + 70, + 96, + 80 + ], + "type": "text", + "content": "A. Res2Net" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 85, + 301, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 85, + 301, + 240 + ], + "spans": [ + { + "bbox": [ + 45, + 85, + 301, + 240 + ], + "type": "text", + "content": "Res2Net [18] is a well-known architecture designed to extract multi-scale features. Unlike ResNet [19], Res2Net uses hierarchical residual connections within a single block, allowing it to capture patterns across varying receptive fields simultaneously [18]. This design offers proven advantages in speech-related tasks, such as SV [20]–[22] and anti-spoofing [23]–[25], where capturing subtle variations and complex acoustic patterns is important. As shown in Fig. 1, Res2Net (highlighted using a light red block) can also serve as a classifier within a speech foundation model-based anti-spoofing system. Its ability to extract multi-scale features has led to superior performance over conventional models and motivates the design of Nested Res2Net in this work." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 255, + 299, + 266 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 255, + 299, + 266 + ], + "spans": [ + { + "bbox": [ + 45, + 255, + 299, + 266 + ], + "type": "text", + "content": "B. Hand-crafted Feature-based Speech Anti-Spoofing Models" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 270, + 301, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 270, + 301, + 426 + ], + "spans": [ + { + "bbox": [ + 45, + 270, + 301, + 426 + ], + "type": "text", + "content": "Hand-crafted acoustic features (such as MFCC) are common choices for many earlier speech anti-spoofing systems. These systems have evolved to effectively detect speech deepfakes [26], [27]. For instance, the Channel-wise Gated Res2Net (CG-Res2Net) [23] introduces a gating mechanism within the Res2Net architecture, enabling dynamic selection of channel-wise features to enhance generalization to unseen attacks. A widely recognized model is AASIST [26], which employs spectro-temporal graph attention layers to capture both temporal and spectral artifacts, thereby achieving efficient and accurate detection. Given AASIST's SOTA performance and its wide adoption in recent anti-spoofing challenges [16], [28], we consider it as our main baseline for evaluation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 441, + 174, + 452 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 441, + 174, + 452 + ], + "spans": [ + { + "bbox": [ + 45, + 441, + 174, + 452 + ], + "type": "text", + "content": "C. Speech Foundation Models" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 455, + 301, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 455, + 301, + 587 + ], + "spans": [ + { + "bbox": [ + 45, + 455, + 301, + 587 + ], + "type": "text", + "content": "Speech foundation models are often referred to as Self-Supervised Learning (SSL) models due to their typical pretraining on large amounts of unlabeled speech data using self-supervised learning techniques. Examples include wav2vec 2.0 [1], HuBERT [2], and WavLM [3]. Unlike hand-crafted acoustic features, which are limited in their ability to adapt to diverse and complex conditions, self-supervised learning (SSL) models learn rich and generalized speech representations that can be effectively adapted to various downstream applications. This allows them to achieve superior performance in speech-related tasks, including speech anti-spoofing." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 601, + 254, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 601, + 254, + 613 + ], + "spans": [ + { + "bbox": [ + 45, + 601, + 254, + 613 + ], + "type": "text", + "content": "D. Speech Foundation Model-based Anti-spoofing" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 616, + 300, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 616, + 300, + 723 + ], + "spans": [ + { + "bbox": [ + 45, + 616, + 300, + 723 + ], + "type": "text", + "content": "As discussed in the previous subsection, speech foundation models can capture more informative representations than handcrafted or raw acoustic features [3]. This makes them highly effective for speech anti-spoofing, as they generalize well across datasets and are more robust to unseen attacks [15]. As a result, many recent anti-spoofing systems increasingly adopt these models as front-ends, feeding their features to the back-end classifiers and consistently outperforming traditional models [16], [29], [30]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 724, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 724, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 724, + 301, + 750 + ], + "type": "text", + "content": "To connect these powerful front-end models to downstream classifiers, a feature aggregation layer is introduced, as shown" + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 313, + 100, + 558, + 168 + ], + "blocks": [ + { + "bbox": [ + 316, + 57, + 556, + 94 + ], + "lines": [ + { + "bbox": [ + 316, + 57, + 556, + 94 + ], + "spans": [ + { + "bbox": [ + 316, + 57, + 556, + 94 + ], + "type": "text", + "content": "TABLEI CONTRIBUTION OF THE DR LAYER ON THE NUMBER OF PARAMETERS AND COMPUTATIONAL COST IN BACK-END MODELS. MMACS STANDS FOR MILLION MULTIPLY-ACCUMULATE OPERATIONS." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 313, + 100, + 558, + 168 + ], + "lines": [ + { + "bbox": [ + 313, + 100, + 558, + 168 + ], + "spans": [ + { + "bbox": [ + 313, + 100, + 558, + 168 + ], + "type": "table", + "html": "
Back-end ModelParametersMMACs
DRTotal%DRTotal%
ResNet [19]131k611k21%26.2470.6237%
Res2Net [18]131k452k29%26.2464.9340%
ECAPA [34]131k497k26%26.2480.2133%
AASIST [26]131k447k29%26.24707.654%
", + "image_path": "dabbcf97be8c75c0deb7beb748fdb20e0361404e5c527c4f5e1d38e00b935d08.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 181, + 563, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 181, + 563, + 229 + ], + "spans": [ + { + "bbox": [ + 307, + 181, + 563, + 229 + ], + "type": "text", + "content": "in Fig. 1. This layer combines features from different SSL layers using methods such as a simple weighted sum or attention-based methods like Squeeze-and-Excitation Aggregation (SEA) [16] and Attentive Merging (AttM) [31]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 229, + 564, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 229, + 564, + 372 + ], + "spans": [ + { + "bbox": [ + 307, + 229, + 564, + 372 + ], + "type": "text", + "content": "Following the aggregation layer, the resulting features are passed to the back-end classifier, as shown in the green box of Fig. 1. Existing methods typically use a DR layer, which reduces the high-dimensional features of " + }, + { + "bbox": [ + 307, + 229, + 564, + 372 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 307, + 229, + 564, + 372 + ], + "type": "text", + "content": " channels (commonly " + }, + { + "bbox": [ + 307, + 229, + 564, + 372 + ], + "type": "inline_equation", + "content": "N = 1024" + }, + { + "bbox": [ + 307, + 229, + 564, + 372 + ], + "type": "text", + "content": " [1], [3], [32]) to a lower dimension " + }, + { + "bbox": [ + 307, + 229, + 564, + 372 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 307, + 229, + 564, + 372 + ], + "type": "text", + "content": " (e.g., " + }, + { + "bbox": [ + 307, + 229, + 564, + 372 + ], + "type": "inline_equation", + "content": "D = 128" + }, + { + "bbox": [ + 307, + 229, + 564, + 372 + ], + "type": "text", + "content": " [15], [16] or " + }, + { + "bbox": [ + 307, + 229, + 564, + 372 + ], + "type": "inline_equation", + "content": "D = 144" + }, + { + "bbox": [ + 307, + 229, + 564, + 372 + ], + "type": "text", + "content": " [17], [33]) to match the classifier's input requirements. The classifier model then extracts features from the DR layer outputs and produces the final score. As illustrated in the red box of Fig. 1, commonly used classifier structures include traditional models such as ResNet [19], Res2Net [18], ECAPA-TDNN [34], and AASIST [26]." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 372, + 564, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 372, + 564, + 503 + ], + "spans": [ + { + "bbox": [ + 307, + 372, + 564, + 503 + ], + "type": "text", + "content": "The strong performance of these systems stems from their ability to capture rich speech representations, enabling more accurate distinction between real and spoofed speech. As a result, these systems have achieved SOTA results [33], [35], [36], especially in recent challenges like ASVspoof 5 [28], [37], CtrSVDD [16], [38], [39], and ADD [40]. However, the use of a DR layer introduces challenges that limit the backend's ability to fully leverage the rich representations from speech foundation models. In this work, we aim to better unlock the potential of foundation models for speech antispoofing. These issues will be discussed in the next subsection." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 309, + 518, + 515, + 530 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 518, + 515, + 530 + ], + "spans": [ + { + "bbox": [ + 309, + 518, + 515, + 530 + ], + "type": "text", + "content": "E. Limitation of Dimensionality Reduction Layer" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 533, + 563, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 533, + 563, + 628 + ], + "spans": [ + { + "bbox": [ + 307, + 533, + 563, + 628 + ], + "type": "text", + "content": "Existing speech foundation model-based anti-spoofing systems excel in extracting rich, high-dimensional feature representations, which capture intricate patterns in speech. However, this high dimensionality poses a significant challenge for downstream tasks. Models in these tasks typically require lower-dimensional features [23], [26], [27], creating a mismatch between the output features of the foundation models and the requirements of downstream processing." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 629, + 564, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 629, + 564, + 724 + ], + "spans": [ + { + "bbox": [ + 307, + 629, + 564, + 724 + ], + "type": "text", + "content": "A commonly used approach for dimensionality reduction is to employ a DR layer. However, this approach has several issues, including parameter overhead and potential information loss. As shown in Table I, our analysis of back-end models further emphasizes the inefficiency of this approach. We consider commonly used feature dimensions of " + }, + { + "bbox": [ + 307, + 629, + 564, + 724 + ], + "type": "inline_equation", + "content": "N = 1024" + }, + { + "bbox": [ + 307, + 629, + 564, + 724 + ], + "type": "text", + "content": " from large models [1], [3], and a reduced dimension of " + }, + { + "bbox": [ + 307, + 629, + 564, + 724 + ], + "type": "inline_equation", + "content": "D = 128" + }, + { + "bbox": [ + 307, + 629, + 564, + 724 + ], + "type": "text", + "content": ", widely adopted in SOTA back-end models [15], [16], [31]." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 724, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 724, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 308, + 724, + 564, + 750 + ], + "type": "text", + "content": "Across various back-end models, the DR layer, despite being just a single layer, consistently accounts for a substantial" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 56, + 562, + 259 + ], + "blocks": [ + { + "bbox": [ + 48, + 56, + 562, + 259 + ], + "lines": [ + { + "bbox": [ + 48, + 56, + 562, + 259 + ], + "spans": [ + { + "bbox": [ + 48, + 56, + 562, + 259 + ], + "type": "image", + "image_path": "60a1d06954731a1d8497f7ad39edfbb82ed60f30aa0d12cd56c49475a8a4119e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 266, + 566, + 314 + ], + "lines": [ + { + "bbox": [ + 45, + 266, + 566, + 314 + ], + "spans": [ + { + "bbox": [ + 45, + 266, + 566, + 314 + ], + "type": "text", + "content": "Fig. 1. The block diagram of the speech foundation model-based speech anti-spoofing system, showcasing both the traditional back-end models and the proposed Nes2Net back-end. The traditional back-end models include a DR layer and a classifier, such as ResNet [19], Res2Net [18], ECAPA-TDNN [34], and AASIST [26]. In contrast, the proposed Nes2Net back-end model features a DR layer-free design. Additionally, an enhanced version of its nested layer, named Nes2Net-X, is introduced to further improve performance. Abbreviations used in the figure include: 'FC' (fully connected layer), 'Conv' (convolutional layer), 'WS' (weighted sum), 'SE' (squeeze-and-excitation module) [41], and 'Att. Stat. Pool.' (attentive statistics pooling) [42]." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 326, + 301, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 326, + 301, + 432 + ], + "spans": [ + { + "bbox": [ + 45, + 326, + 301, + 432 + ], + "type": "text", + "content": "share of parameters and computational cost, underscoring its resource-intensive nature. For instance, the DR layer accounts for " + }, + { + "bbox": [ + 45, + 326, + 301, + 432 + ], + "type": "inline_equation", + "content": "21\\%" + }, + { + "bbox": [ + 45, + 326, + 301, + 432 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 45, + 326, + 301, + 432 + ], + "type": "inline_equation", + "content": "29\\%" + }, + { + "bbox": [ + 45, + 326, + 301, + 432 + ], + "type": "text", + "content": " of the parameters across ResNet, Res2Net, ECAPA, and AASIST. In terms of computational cost, the DR layer generally contributes at least one-third of the total MACs. AASIST is the only exception, where the DR layer accounts for just " + }, + { + "bbox": [ + 45, + 326, + 301, + 432 + ], + "type": "inline_equation", + "content": "4\\%" + }, + { + "bbox": [ + 45, + 326, + 301, + 432 + ], + "type": "text", + "content": " of the MACs, primarily because its overall MAC count is an order of magnitude higher than that of other models." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 434, + 301, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 434, + 301, + 494 + ], + "spans": [ + { + "bbox": [ + 45, + 434, + 301, + 494 + ], + "type": "text", + "content": "This table highlights that a single DR layer significantly inflates the back-end model's size and resource demands. Furthermore, its direct projection design discards important high-dimensional features, limiting the overall potential of speech foundation models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 127, + 510, + 220, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 510, + 220, + 521 + ], + "spans": [ + { + "bbox": [ + 127, + 510, + 220, + 521 + ], + "type": "text", + "content": "III. METHODOLOGY" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 529, + 212, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 529, + 212, + 540 + ], + "spans": [ + { + "bbox": [ + 45, + 529, + 212, + 540 + ], + "type": "text", + "content": "A. Proposed Nested Res2Net (Nes2Net)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 544, + 300, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 544, + 300, + 628 + ], + "spans": [ + { + "bbox": [ + 45, + 544, + 300, + 628 + ], + "type": "text", + "content": "The design of Nes2Net is driven by two primary objectives: 1) effectively and efficiently utilizing the high-dimensional features from speech foundation models, and 2) enhancing multi-scale feature extraction to achieve robust generalization in speech anti-spoofing tasks. These objectives are realized through a novel nested architecture that simultaneously improves the efficiency, flexibility, and robustness of the model." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 628, + 301, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 628, + 301, + 749 + ], + "spans": [ + { + "bbox": [ + 45, + 628, + 301, + 749 + ], + "type": "text", + "content": "Efficiency and Retention of Rich Feature Information: The analysis in Section II-E reveals the limitations of employing the DR layer. Building upon the observations, Nes2Net entirely removes the DR layer, directly processing high-dimensional features to retain their intrinsic richness and minimize unnecessary computational costs. By bypassing the DR layer, Nes2Net prevents the information bottleneck typically caused by early dimensionality reduction. This ensures the preservation of detailed representations essential for accurately distinguishing genuine speech from spoofed audio." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 326, + 564, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 326, + 564, + 469 + ], + "spans": [ + { + "bbox": [ + 307, + 326, + 564, + 469 + ], + "type": "text", + "content": "Enhanced Multi-Scale Feature Interaction and Expressiveness: While the Res2Net architecture effectively extracts multi-scale features through hierarchical splits, it exhibits significant limitations when processing high-dimensional features directly, especially with large split scales " + }, + { + "bbox": [ + 307, + 326, + 564, + 469 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 307, + 326, + 564, + 469 + ], + "type": "text", + "content": ". Specifically, Res2Net suffers from feature dilution [18], redundant transformations [43], and restricted interactions among channels. Excessive splitting fragments the features, weakening their expressiveness, and repetitive transformations increase computational redundancy, potentially causing overfitting. Moreover, closely related information can be distributed across non-adjacent subsets, limiting effective cross-channel interactions." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 472, + 564, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 472, + 564, + 674 + ], + "spans": [ + { + "bbox": [ + 307, + 472, + 564, + 674 + ], + "type": "text", + "content": "To overcome these limitations, as illustrated in Fig. 1, we propose a novel Nested Res2Net (Nes2Net) architecture that introduces a hierarchical nesting structure. This additional degree of flexibility significantly enhances the model's representational capability. Each nested layer progressively refines features by building upon outputs from preceding layers and also incorporates efficient local cross-channel attention mechanisms [44], [45], strengthening interactions across channels. This holistic feature extraction approach enables Nes2Net to comprehensively capture intricate speech patterns. Moreover, the cumulative refinement effectively mitigates the issue of feature dilution, preserving rich and expressive multi-scale information. Benefiting from the structural advantages of the nesting strategy, the need for excessive fine-grained splits is reduced, effectively mitigating redundant transformations. This approach also minimizes unnecessary computations, resulting in a compact yet highly expressive model." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 677, + 565, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 677, + 565, + 749 + ], + "spans": [ + { + "bbox": [ + 307, + 677, + 565, + 749 + ], + "type": "text", + "content": "Critically, overfitting is a well-known challenge in speech anti-spoofing tasks, often leading to degraded performance in cross-domain scenarios. Previous studies [23], [26], particularly with compact models like AASIST and Res2Net (both with fewer than 500k parameters), have shown that smaller models can help reduce overfitting. Our experiments with these" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 138 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 138 + ], + "type": "text", + "content": "models confirm that simply increasing their size does not always lead to better performance and can, in fact, make overfitting worse. As a result, improving feature quality through smarter model structure design becomes more important than just scaling up the model. The nested architecture of Nes2Net provides clear benefits as it maintains computational efficiency while reducing the risk of overfitting." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 138, + 301, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 138, + 301, + 162 + ], + "spans": [ + { + "bbox": [ + 45, + 138, + 301, + 162 + ], + "type": "text", + "content": "The Nes2Net consists of an outer layer and several identical nested layers, described as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "spans": [ + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "text", + "content": "1) Outer Layer: The outer layer of Nes2Net adopts a structure similar to that of Res2Net. The high-dimensional features produced by a speech foundation model are uniformly split into " + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "inline_equation", + "content": "s_1" + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "text", + "content": " feature map subsets, denoted by " + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "inline_equation", + "content": "i \\in \\{1, 2, \\dots, s_1\\}" + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "text", + "content": ". Each feature subset " + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "text", + "content": " has the same spatial size but contains only " + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "inline_equation", + "content": "\\frac{1}{s_1}" + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "text", + "content": " of the channels of the input feature map. With the exception of " + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "inline_equation", + "content": "x_1" + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "text", + "content": ", each " + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "text", + "content": " is paired with a corresponding nested layer, denoted by " + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_i(\\cdot)" + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "text", + "content": ". The output of " + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_i(\\cdot)" + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "text", + "content": ", represented as " + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "inline_equation", + "content": "y_i" + }, + { + "bbox": [ + 45, + 163, + 301, + 270 + ], + "type": "text", + "content": ", is computed as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 96, + 280, + 301, + 324 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 280, + 301, + 324 + ], + "spans": [ + { + "bbox": [ + 96, + 280, + 301, + 324 + ], + "type": "interline_equation", + "content": "y _ {i} = \\left\\{ \\begin{array}{l l} x _ {i} & i = 1; \\\\ \\mathbf {K} _ {i} \\left(x _ {i}\\right) & i = 2; \\\\ \\mathbf {K} _ {i} \\left(x _ {i} + y _ {i - 1}\\right) & 2 < i \\leq s _ {1}. \\end{array} \\right. \\tag {1}", + "image_path": "42c4d557aa680975a756269def714eecc57dc1e96b22095e36c26eabbddd4813.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 330, + 301, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 330, + 301, + 437 + ], + "spans": [ + { + "bbox": [ + 45, + 330, + 301, + 437 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 45, + 330, + 301, + 437 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 45, + 330, + 301, + 437 + ], + "type": "text", + "content": " is first added to the output of " + }, + { + "bbox": [ + 45, + 330, + 301, + 437 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_{i - 1}(\\cdot)" + }, + { + "bbox": [ + 45, + 330, + 301, + 437 + ], + "type": "text", + "content": ", and the resulting feature map is then fed into " + }, + { + "bbox": [ + 45, + 330, + 301, + 437 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_i(\\cdot)" + }, + { + "bbox": [ + 45, + 330, + 301, + 437 + ], + "type": "text", + "content": " for further processing. All " + }, + { + "bbox": [ + 45, + 330, + 301, + 437 + ], + "type": "inline_equation", + "content": "y_{i}" + }, + { + "bbox": [ + 45, + 330, + 301, + 437 + ], + "type": "text", + "content": " features are concatenated along the channel dimension. Due to the combinatorial explosion effect [18], the output features encapsulate a fusion of receptive field characteristics across different scales and frame levels. These features are then pooled along the time axis to convert frame-level features into utterance-level representations, which are subsequently used to compute the final classification score." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 438, + 301, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 438, + 301, + 498 + ], + "spans": [ + { + "bbox": [ + 45, + 438, + 301, + 498 + ], + "type": "text", + "content": "It is worth noting that since the outer layer directly processes high-dimensional features from the speech foundation model, the original two convolutional layers (kernel size of 1) used before splitting and after concatenation in Res2Net are removed to improve efficiency." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 498, + 301, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 498, + 301, + 546 + ], + "spans": [ + { + "bbox": [ + 45, + 498, + 301, + 546 + ], + "type": "text", + "content": "2) Nested Layer: The nested layer acts as the core module responsible for processing the outer layer's intermediate features, denoted by " + }, + { + "bbox": [ + 45, + 498, + 301, + 546 + ], + "type": "inline_equation", + "content": "x_{i}^{\\prime}" + }, + { + "bbox": [ + 45, + 498, + 301, + 546 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 45, + 498, + 301, + 546 + ], + "type": "inline_equation", + "content": "i \\in \\{2, \\ldots, s_1\\}" + }, + { + "bbox": [ + 45, + 498, + 301, + 546 + ], + "type": "text", + "content": ". Based on Eq. 1, " + }, + { + "bbox": [ + 45, + 498, + 301, + 546 + ], + "type": "inline_equation", + "content": "x_{i}^{\\prime}" + }, + { + "bbox": [ + 45, + 498, + 301, + 546 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 555, + 299, + 587 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 299, + 587 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 299, + 587 + ], + "type": "interline_equation", + "content": "x _ {i} ^ {\\prime} = \\left\\{ \\begin{array}{l l} x _ {i} & i = 2; \\\\ x _ {i} + y _ {i - 1} & 2 < i \\leq s _ {1}. \\end{array} \\right. \\tag {2}", + "image_path": "7e69571a4ff0ffaa47a0d2c05ca2240776de844853e960ce84ab1186458a45c6.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 592, + 301, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 592, + 301, + 664 + ], + "spans": [ + { + "bbox": [ + 45, + 592, + 301, + 664 + ], + "type": "text", + "content": "Each nested layer " + }, + { + "bbox": [ + 45, + 592, + 301, + 664 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_i(\\cdot)" + }, + { + "bbox": [ + 45, + 592, + 301, + 664 + ], + "type": "text", + "content": " is designed to extract multi-scale representations from its input while maintaining computational efficiency. As shown in Fig. 1, the structure of " + }, + { + "bbox": [ + 45, + 592, + 301, + 664 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_i(\\cdot)" + }, + { + "bbox": [ + 45, + 592, + 301, + 664 + ], + "type": "text", + "content": " follows a SE-Res2Net-like design, but its input is the feature subset " + }, + { + "bbox": [ + 45, + 592, + 301, + 664 + ], + "type": "inline_equation", + "content": "x_i'" + }, + { + "bbox": [ + 45, + 592, + 301, + 664 + ], + "type": "text", + "content": " from the outer layer of Nes2Net. Specifically, each nested layer consists of the following components:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 664, + 301, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 664, + 301, + 700 + ], + "spans": [ + { + "bbox": [ + 45, + 664, + 301, + 700 + ], + "type": "text", + "content": "Convolutional Layers: The input feature map is first processed by a convolutional layer with a kernel size of 1 to extract local features while preserving the spatial dimensions." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "text", + "content": "Multi-Scale Feature Extraction: To enable multi-scale processing, the input feature map " + }, + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "inline_equation", + "content": "x_{i}^{\\prime}" + }, + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "text", + "content": " is equally split into " + }, + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "inline_equation", + "content": "s_2" + }, + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "text", + "content": " subsets along the channel dimension, denoted by " + }, + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "inline_equation", + "content": "x_{i,j}^{\\prime}" + }, + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "inline_equation", + "content": "j \\in \\{1, 2, \\ldots, s_2\\}" + }, + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "text", + "content": ". Each subset undergoes separate" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 55, + 563, + 79 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 55, + 563, + 79 + ], + "spans": [ + { + "bbox": [ + 308, + 55, + 563, + 79 + ], + "type": "text", + "content": "transformations through convolutional operations " + }, + { + "bbox": [ + 308, + 55, + 563, + 79 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_j" + }, + { + "bbox": [ + 308, + 55, + 563, + 79 + ], + "type": "text", + "content": " with varying receptive fields, yielding " + }, + { + "bbox": [ + 308, + 55, + 563, + 79 + ], + "type": "inline_equation", + "content": "y_{i,j}" + }, + { + "bbox": [ + 308, + 55, + 563, + 79 + ], + "type": "text", + "content": ", formulated as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 347, + 83, + 563, + 114 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 83, + 563, + 114 + ], + "spans": [ + { + "bbox": [ + 347, + 83, + 563, + 114 + ], + "type": "interline_equation", + "content": "y _ {i, j} = \\left\\{ \\begin{array}{l l} x _ {i, j} ^ {\\prime} & j = 1; \\\\ \\mathbf {M} _ {j} \\left(x _ {i, j} ^ {\\prime} + y _ {i, j - 1}\\right) & 1 < j \\leq s _ {2}. \\end{array} \\right. \\tag {3}", + "image_path": "5e9d3dac3968c2aca3602c7becebdf97db62a94ae78c7df5ff863f2e6c45e0e0.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 118, + 563, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 118, + 563, + 141 + ], + "spans": [ + { + "bbox": [ + 308, + 118, + 563, + 141 + ], + "type": "text", + "content": "These transformed subsets are then concatenated to form the output " + }, + { + "bbox": [ + 308, + 118, + 563, + 141 + ], + "type": "inline_equation", + "content": "y_{i}" + }, + { + "bbox": [ + 308, + 118, + 563, + 141 + ], + "type": "text", + "content": " of the nested layer." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 141, + 563, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 141, + 563, + 201 + ], + "spans": [ + { + "bbox": [ + 308, + 141, + 563, + 201 + ], + "type": "text", + "content": "SE Module: To further enhance the feature representations, a Squeeze-and-Excitation (SE) module is integrated into each nested layer. The SE module adaptively recalibrates channelwise features to emphasize informative features and suppress less relevant ones [41]." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 201, + 563, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 201, + 563, + 248 + ], + "spans": [ + { + "bbox": [ + 308, + 201, + 563, + 248 + ], + "type": "text", + "content": "Residual Connections: To enhance gradient flow and stabilize training, a residual connection is applied by adding the input of " + }, + { + "bbox": [ + 308, + 201, + 563, + 248 + ], + "type": "inline_equation", + "content": "x_{i}^{\\prime}" + }, + { + "bbox": [ + 308, + 201, + 563, + 248 + ], + "type": "text", + "content": " to its output " + }, + { + "bbox": [ + 308, + 201, + 563, + 248 + ], + "type": "inline_equation", + "content": "y_{i}" + }, + { + "bbox": [ + 308, + 201, + 563, + 248 + ], + "type": "text", + "content": ". This design preserves the original information while incorporating newly learned features." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 248, + 563, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 248, + 563, + 283 + ], + "spans": [ + { + "bbox": [ + 308, + 248, + 563, + 283 + ], + "type": "text", + "content": "In summary, the nested layer is lightweight, highly efficient, and designed to improve robustness and generalization across diverse conditions." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 309, + 296, + 487, + 307 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 296, + 487, + 307 + ], + "spans": [ + { + "bbox": [ + 309, + 296, + 487, + 307 + ], + "type": "text", + "content": "B. Enhanced Nested Res2Net (Nes2Net-X)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 310, + 564, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 310, + 564, + 453 + ], + "spans": [ + { + "bbox": [ + 307, + 310, + 564, + 453 + ], + "type": "text", + "content": "Nes2Net efficiently addresses the high-dimensional feature issue. However, it relies on an additive combination method within the nested layer, which may limit the flexibility and effectiveness of feature extraction, as it implicitly assigns equal importance to all features. To further enhance the representational capacity of Nes2Net, we propose an improved variant named Nes2Net-X. It replaces the original addition operation in the nested layer with a concatenation followed by a learnable weighted summation. This design explicitly preserves feature subset individuality before fusion and employs learnable weights to adaptively combine these subsets. The Nes2Net-X consists of the following components:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 454, + 564, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 454, + 564, + 525 + ], + "spans": [ + { + "bbox": [ + 308, + 454, + 564, + 525 + ], + "type": "text", + "content": "Feature Splitting and Processing: This component is the same as that in Nes2Net nested layer. The input feature " + }, + { + "bbox": [ + 308, + 454, + 564, + 525 + ], + "type": "inline_equation", + "content": "x_{i}^{\\prime}" + }, + { + "bbox": [ + 308, + 454, + 564, + 525 + ], + "type": "text", + "content": " is equally split into " + }, + { + "bbox": [ + 308, + 454, + 564, + 525 + ], + "type": "inline_equation", + "content": "s_2" + }, + { + "bbox": [ + 308, + 454, + 564, + 525 + ], + "type": "text", + "content": " subsets along the channel dimension, denoted by " + }, + { + "bbox": [ + 308, + 454, + 564, + 525 + ], + "type": "inline_equation", + "content": "x_{i,j}^{\\prime}" + }, + { + "bbox": [ + 308, + 454, + 564, + 525 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 308, + 454, + 564, + 525 + ], + "type": "inline_equation", + "content": "j \\in \\{1, 2, \\dots, s_2\\}" + }, + { + "bbox": [ + 308, + 454, + 564, + 525 + ], + "type": "text", + "content": ". Each subset " + }, + { + "bbox": [ + 308, + 454, + 564, + 525 + ], + "type": "inline_equation", + "content": "x_{i,j}^{\\prime}" + }, + { + "bbox": [ + 308, + 454, + 564, + 525 + ], + "type": "text", + "content": " undergoes a convolutional operation to extract feature representations." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 525, + 564, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 525, + 564, + 585 + ], + "spans": [ + { + "bbox": [ + 308, + 525, + 564, + 585 + ], + "type": "text", + "content": "Feature Concatenation: The outputs of the convolutional layers are denoted as " + }, + { + "bbox": [ + 308, + 525, + 564, + 585 + ], + "type": "inline_equation", + "content": "z_{i,j}" + }, + { + "bbox": [ + 308, + 525, + 564, + 585 + ], + "type": "text", + "content": ". In Nes2Net-X, instead of summing the processed features as in the Nes2Net, each current subset " + }, + { + "bbox": [ + 308, + 525, + 564, + 585 + ], + "type": "inline_equation", + "content": "x_{i,j}^{\\prime}" + }, + { + "bbox": [ + 308, + 525, + 564, + 585 + ], + "type": "text", + "content": " is concatenated with the previous output " + }, + { + "bbox": [ + 308, + 525, + 564, + 585 + ], + "type": "inline_equation", + "content": "z_{i,j-1}" + }, + { + "bbox": [ + 308, + 525, + 564, + 585 + ], + "type": "text", + "content": " along a newly introduced dimension before being processed." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 585, + 564, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 585, + 564, + 692 + ], + "spans": [ + { + "bbox": [ + 307, + 585, + 564, + 692 + ], + "type": "text", + "content": "Weighted Sum: The additional dimension created during concatenation is merged back into the original feature space using a 'weighted sum' operation. This operation enables the model to dynamically assign importance to each subset, enhancing feature representation. For each subset, the 'weighted sum' is applied to the output feature " + }, + { + "bbox": [ + 307, + 585, + 564, + 692 + ], + "type": "inline_equation", + "content": "z_{i,j}" + }, + { + "bbox": [ + 307, + 585, + 564, + 692 + ], + "type": "text", + "content": " of the convolutional layer. Let " + }, + { + "bbox": [ + 307, + 585, + 564, + 692 + ], + "type": "inline_equation", + "content": "w_{i,j}" + }, + { + "bbox": [ + 307, + 585, + 564, + 692 + ], + "type": "text", + "content": " denote the learnable weights assigned to each concatenated feature. The output " + }, + { + "bbox": [ + 307, + 585, + 564, + 692 + ], + "type": "inline_equation", + "content": "y_{i,j}" + }, + { + "bbox": [ + 307, + 585, + 564, + 692 + ], + "type": "text", + "content": " of the 'weighted sum' is computed as:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 387, + 691, + 563, + 721 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 387, + 691, + 563, + 721 + ], + "spans": [ + { + "bbox": [ + 387, + 691, + 563, + 721 + ], + "type": "interline_equation", + "content": "y _ {i, j} = \\sum_ {k = 1} ^ {s} w _ {i, j, k} \\cdot z _ {i, j, k} \\tag {4}", + "image_path": "cdb077715738a859647716de99f3da764ab98bc0bbef54fc6a8d8d889b7cb8ce.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 309, + 724, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 724, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 309, + 724, + 564, + 750 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 309, + 724, + 564, + 750 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 309, + 724, + 564, + 750 + ], + "type": "text", + "content": " denotes the number of subsets, " + }, + { + "bbox": [ + 309, + 724, + 564, + 750 + ], + "type": "inline_equation", + "content": "w_{i,j,k}" + }, + { + "bbox": [ + 309, + 724, + 564, + 750 + ], + "type": "text", + "content": " represents the weight for the " + }, + { + "bbox": [ + 309, + 724, + 564, + 750 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 309, + 724, + 564, + 750 + ], + "type": "text", + "content": "-th subset features " + }, + { + "bbox": [ + 309, + 724, + 564, + 750 + ], + "type": "inline_equation", + "content": "z_{i,j,k}" + }, + { + "bbox": [ + 309, + 724, + 564, + 750 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 79 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 79 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 79 + ], + "type": "text", + "content": "The weighted summation provides more flexible and effective feature integration, offering several advantages:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 80, + 301, + 236 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 55, + 80, + 301, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 80, + 301, + 116 + ], + "spans": [ + { + "bbox": [ + 55, + 80, + 301, + 116 + ], + "type": "text", + "content": "- Enhanced Feature Diversity: By concatenating features across subsets, the network captures a richer set of features, encompassing various aspects of the input data." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 116, + 301, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 116, + 301, + 175 + ], + "spans": [ + { + "bbox": [ + 56, + 116, + 301, + 175 + ], + "type": "text", + "content": "- Learnable Feature Fusion: The introduction of learnable weights " + }, + { + "bbox": [ + 56, + 116, + 301, + 175 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 56, + 116, + 301, + 175 + ], + "type": "text", + "content": " enables the model to prioritize more informative features, effectively suppressing less relevant ones. This adaptive mechanism allows the network to focus on the most discriminative features for the task." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 176, + 301, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 176, + 301, + 236 + ], + "spans": [ + { + "bbox": [ + 56, + 176, + 301, + 236 + ], + "type": "text", + "content": "- Improved Gradient Flow: By combining concatenation with weighted summation, the model facilitates better gradient propagation during training. This helps address potential issues such as vanishing or exploding gradients, leading to more stable and efficient learning." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 45, + 237, + 301, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 237, + 301, + 274 + ], + "spans": [ + { + "bbox": [ + 45, + 237, + 301, + 274 + ], + "type": "text", + "content": "These modifications enable Nes2Net-X to retain the strengths of Nes2Net while introducing greater flexibility in feature fusion, ultimately improving performance." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 284, + 236, + 294 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 284, + 236, + 294 + ], + "spans": [ + { + "bbox": [ + 111, + 284, + 236, + 294 + ], + "type": "text", + "content": "IV. EXPERIMENTAL SETUPS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 299, + 98, + 309 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 299, + 98, + 309 + ], + "spans": [ + { + "bbox": [ + 45, + 299, + 98, + 309 + ], + "type": "text", + "content": "A. Datasets" + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 46, + 350, + 299, + 450 + ], + "blocks": [ + { + "bbox": [ + 67, + 324, + 280, + 343 + ], + "lines": [ + { + "bbox": [ + 67, + 324, + 280, + 343 + ], + "spans": [ + { + "bbox": [ + 67, + 324, + 280, + 343 + ], + "type": "text", + "content": "TABLE II AN SUMMARY OF THE DATASETS USED IN OUR EXPERIMENTS." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 46, + 350, + 299, + 450 + ], + "lines": [ + { + "bbox": [ + 46, + 350, + 299, + 450 + ], + "spans": [ + { + "bbox": [ + 46, + 350, + 299, + 450 + ], + "type": "table", + "html": "
DatasetSpoofing TypeNumber of Samples
TrainValidTest
CtrSVDD w/o ACEsinger bona fide [46]Singing Voice84,40443,62564,734
CtrSVDD w/ ACEsinger bona fide [46]67,579
ASVspoof 2019 [47]25,38024,844-
ASVspoof 2021 LA [48]--181,566
ASVspoof 2021 DF [48]Speech--611,829
ASVspoof 5 [49]182,357140,950680,774
In-the-Wild [50]--31,779
PartialSpoof [51]Partial Spoof25,38024,84471,237
", + "image_path": "98b5b166adf5a4085d47308265f5b1d43548aef5667c9a311dea779157979ccf.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 461, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 461, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 461, + 301, + 750 + ], + "type": "text", + "content": "We use five datasets across various scenarios, including singing voice deepfake, fully spoofed speech, adversarial attacks, and partially spoofed speech, to evaluate the performance of the proposed model. Singing voice deepfake detection (SVDD) is a growing area of interest in the research community [46], [52], [53]. The CtrlSVDD dataset [46], [52] offers structured attack types and official evaluation protocols, making it suitable for systematic architecture exploration. As a newly collected resource, it captures recent spoofing techniques, providing a more challenging and relevant benchmark for modern anti-spoofing systems. We therefore adopt it as a representative example. Moreover, fully spoofed speech is the most studied category. In this work, we include two categories of datasets: (1) the ASVspoof series, which comprises ASVspoof 2019 [47], ASVspoof 2021 Logical Access (LA), ASVspoof 2021 Deepfake (DF) [48], and ASVspoof 5 [49]; and (2) the In-the-Wild dataset [50], which reflects real-world usage scenarios. Partially spoofed speech alters only part of an utterance to convey deceptive meaning. This emerging challenge has attracted growing attention. We use the PartialSpoof [51] dataset as a representative benchmark. Table II summarizes the datasets used in this study. Models are trained on the training set and validated on the validation set to select the best checkpoint for testing." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 55, + 564, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 55, + 564, + 187 + ], + "spans": [ + { + "bbox": [ + 307, + 55, + 564, + 187 + ], + "type": "text", + "content": "For CtrlSVDD [46], we report results on two official test protocols, according to whether ACESinger bona fide samples are included. The 'A14' attack type of the CtrlSVDD dataset is excluded following the official guidelines [46]. ASVspoof 2019 [47] is used only for training and validation, while the In-the-Wild [50], ASVspoof 2021 LA and DF [48] datasets are used only for testing. For the recently released ASVspoof 5 dataset [49], we use its train, development, and evaluation partitions for model training, validation, and testing, respectively. For PartialSpoof [51], we follow the standard partitioning into train, development, and evaluation sets." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 309, + 201, + 404, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 201, + 404, + 213 + ], + "spans": [ + { + "bbox": [ + 309, + 201, + 404, + 213 + ], + "type": "text", + "content": "B. Training Strategies" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 215, + 564, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 215, + 564, + 299 + ], + "spans": [ + { + "bbox": [ + 307, + 215, + 564, + 299 + ], + "type": "text", + "content": "Each experiment is run three times using different random seeds. We report both the result from the best-performing run and the average performance across all runs. The values of " + }, + { + "bbox": [ + 307, + 215, + 564, + 299 + ], + "type": "inline_equation", + "content": "s_1" + }, + { + "bbox": [ + 307, + 215, + 564, + 299 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 307, + 215, + 564, + 299 + ], + "type": "inline_equation", + "content": "s_2" + }, + { + "bbox": [ + 307, + 215, + 564, + 299 + ], + "type": "text", + "content": " are both set to 8 for Nes2Net and Nes2Net-X. The baseline systems for each dataset are built using SOTA models, and our proposed model adopts similar training strategies. The details are as follows:" + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 313, + 309, + 558, + 373 + ], + "blocks": [ + { + "bbox": [ + 313, + 309, + 558, + 373 + ], + "lines": [ + { + "bbox": [ + 313, + 309, + 558, + 373 + ], + "spans": [ + { + "bbox": [ + 313, + 309, + 558, + 373 + ], + "type": "image", + "image_path": "efe1584a963bd86fedb0e112a862eb4e48cd69fc72a15bbe69361f939014ea25.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 376, + 526, + 387 + ], + "lines": [ + { + "bbox": [ + 309, + 376, + 526, + 387 + ], + "spans": [ + { + "bbox": [ + 309, + 376, + 526, + 387 + ], + "type": "text", + "content": "Fig. 2. The cyclic learning rate schedule using cosine annealing." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 398, + 564, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 398, + 564, + 506 + ], + "spans": [ + { + "bbox": [ + 307, + 398, + 564, + 506 + ], + "type": "text", + "content": "CtSVDD: For the models trained on the CtrSVDD dataset [46], [52], we follow the baseline system from " + }, + { + "bbox": [ + 307, + 398, + 564, + 506 + ], + "type": "inline_equation", + "content": "[16]^1" + }, + { + "bbox": [ + 307, + 398, + 564, + 506 + ], + "type": "text", + "content": ". Following the setting in [16], we use a random seed of 42 to ensure reproducibility. Furthermore, due to the inherent stochasticity in deep learning, repeated runs are necessary to obtain reliable average results. We use the AdamW optimizer with batch size 34, an initial learning rate of " + }, + { + "bbox": [ + 307, + 398, + 564, + 506 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-6}" + }, + { + "bbox": [ + 307, + 398, + 564, + 506 + ], + "type": "text", + "content": ", and weight decay of " + }, + { + "bbox": [ + 307, + 398, + 564, + 506 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 307, + 398, + 564, + 506 + ], + "type": "text", + "content": ". The learning rate is scheduled using cosine annealing with a cycle to a minimum of " + }, + { + "bbox": [ + 307, + 398, + 564, + 506 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-9}" + }, + { + "bbox": [ + 307, + 398, + 564, + 506 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 506, + 564, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 506, + 564, + 648 + ], + "spans": [ + { + "bbox": [ + 307, + 506, + 564, + 648 + ], + "type": "text", + "content": "As shown in Fig. 2, over 75 training epochs, we select checkpoints from the epoch with the minimum learning rate, as well as its preceding and following epochs, for validation. The best validation result is then used for testing. We use binary focal loss [54], a generalization of binary cross-entropy loss, with a focusing parameter " + }, + { + "bbox": [ + 307, + 506, + 564, + 648 + ], + "type": "inline_equation", + "content": "(\\gamma)" + }, + { + "bbox": [ + 307, + 506, + 564, + 648 + ], + "type": "text", + "content": " of 2 and a positive class weight " + }, + { + "bbox": [ + 307, + 506, + 564, + 648 + ], + "type": "inline_equation", + "content": "(\\alpha)" + }, + { + "bbox": [ + 307, + 506, + 564, + 648 + ], + "type": "text", + "content": " of 0.25. To standardize input length, each sample is randomly cropped or padded to 4 seconds during training. We adopt the Rawboost 'parallel: " + }, + { + "bbox": [ + 307, + 506, + 564, + 648 + ], + "type": "inline_equation", + "content": "(1)+(2)" + }, + { + "bbox": [ + 307, + 506, + 564, + 648 + ], + "type": "text", + "content": "' data augmentation strategy [55], as explored in [16]. WavLM is used as the frontend model for this dataset. The pre-trained and implementation of WavLM are obtained from S3PRL2." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 649, + 564, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 649, + 564, + 710 + ], + "spans": [ + { + "bbox": [ + 308, + 649, + 564, + 710 + ], + "type": "text", + "content": "ASVspoof 2019 & 2021: For the models trained on the ASVspoof 2019 [47] dataset, we follow the baseline system proposed in " + }, + { + "bbox": [ + 308, + 649, + 564, + 710 + ], + "type": "inline_equation", + "content": "[15]^3" + }, + { + "bbox": [ + 308, + 649, + 564, + 710 + ], + "type": "text", + "content": ". Audio data are cropped or concatenated to create segments of approximately 4 seconds in duration (64,600 samples) for both training and testing. We use the" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 717, + 487, + 748 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 317, + 717, + 461, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 717, + 461, + 727 + ], + "spans": [ + { + "bbox": [ + 317, + 717, + 461, + 727 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 317, + 717, + 461, + 727 + ], + "type": "text", + "content": "https://github.com/Anmol2059/SVDD2024" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 727, + 418, + 738 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 727, + 418, + 738 + ], + "spans": [ + { + "bbox": [ + 317, + 727, + 418, + 738 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 317, + 727, + 418, + 738 + ], + "type": "text", + "content": "https://github.com/s3prl/s3prl" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 738, + 487, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 738, + 487, + 748 + ], + "spans": [ + { + "bbox": [ + 317, + 738, + 487, + 748 + ], + "type": "text", + "content": "3https://github.com/TakHemlata/SSL_Anti-spoofing" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 106, + 569, + 261 + ], + "blocks": [ + { + "bbox": [ + 46, + 56, + 564, + 103 + ], + "lines": [ + { + "bbox": [ + 46, + 56, + 564, + 103 + ], + "spans": [ + { + "bbox": [ + 46, + 56, + 564, + 103 + ], + "type": "text", + "content": "TABLE III PERFORMANCE IN EER " + }, + { + "bbox": [ + 46, + 56, + 564, + 103 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 46, + 56, + 564, + 103 + ], + "type": "text", + "content": " ON THE CTRSVDD EVALUATION SET [46] WITH WAVLM [3] FRONT-END. RESULTS ARE SHOWN AS 'BEST (MEAN)' OVER 3 RUNS. PARAMETERS. AND MMACs REFER TO NUMBER OF PARAMETERS AND MILLION MULTIPLY-ACCUMULATE OPERATIONS, RESPECTIVELY. W/O AND W/ ACE B.F. REFER TO 'WITHOUT' AND 'WITH' ACESINGER BONA FIDE SAMPLES, RESPECTIVELY. ATTACK-SPECIFIC EERS ARE COMPUTED UNDER THE 'W/O ACE B.F' CONDITION. BEST RESULTS ARE IN BOLD; SECOND-BEST ARE UNDERlined.' " + }, + { + "bbox": [ + 46, + 56, + 564, + 103 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 46, + 56, + 564, + 103 + ], + "type": "text", + "content": " DENOTES IMPLEMENTATION CONDUCTED BY US." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 106, + 569, + 261 + ], + "lines": [ + { + "bbox": [ + 47, + 106, + 569, + 261 + ], + "spans": [ + { + "bbox": [ + 47, + 106, + 569, + 261 + ], + "type": "table", + "html": "
Back-endParams.MMACsEER of Different Attack TypesPooled EER
A9A10A11A12A13w/o ACE. B.F.w/ ACE. B.F.
XWSB [39] *--------2.32
SLS [39]--------2.59
AASIST (C=32) [16]447k707.65------2.70
AASIST Light (C=24) †159k91.351.27 (1.37)0.87 (1.00)5.44 (5.86)4.84 (5.65)0.98 (1.05)3.95 (4.35)3.41 (3.77)
AASIST Standard(C=32) †447k707.651.18 (1.28)0.73 (0.86)3.63 (3.86)5.65 (5.77)0.88 (1.00)3.30 (3.36)2.79 (2.89)
AASIST Large(C=40) †662k1,091.281.32 (1.37)0.87 (0.97)3.70 (3.96)5.04 (5.63)0.96 (1.06)3.19 (3.36)2.71 (2.94)
AASIST XL(C=48) †835k1,555.561.23 (1.36)0.76 (0.92)3.40 (4.64)4.93 (5.55)0.89 (1.06)3.12 (3.62)2.76 (3.18)
AASIST XXL(C=56) †1,087k2,104.570.96 (1.20)0.66 (0.84)3.86 (4.15)4.83 (5.43)0.75 (0.95)3.05 (3.43)2.65 (2.95)
ResNet †611k70.621.18 (1.21)0.80 (0.93)3.97 (5.06)4.60 (4.86)0.96 (1.03)3.11 (3.61)2.74 (3.17)
Res2Net †452k64.931.26 (1.37)0.83 (0.86)3.59 (4.08)4.45 (4.80)1.08 (1.09)3.02 (3.24)2.61 (2.78)
ECAPA-TDNN (C=128) †497k80.211.18 (1.39)0.67 (0.85)4.47 (5.84)4.63 (4.96)0.87 (1.04)3.19 (3.74)2.79 (3.30)
Proposed Nes2Net511k58.111.23 (1.34)0.76 (0.81)2.40 (2.43)5.00 (5.24)0.96 (0.99)2.53 (2.55)2.22 (2.27)
Proposed Nes2Net-X511k91.351.21 (1.23)0.63 (0.76)2.09 (2.32)4.99 (5.24)0.83 (0.92)2.48 (2.51)2.20 (2.24)
", + "image_path": "bb3829ac42d9a686c979bc4e9bb61faab459c4bb9e22a76b5267ae278546f1d8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 47, + 262, + 534, + 270 + ], + "lines": [ + { + "bbox": [ + 47, + 262, + 534, + 270 + ], + "spans": [ + { + "bbox": [ + 47, + 262, + 534, + 270 + ], + "type": "inline_equation", + "content": "\\text{※}" + }, + { + "bbox": [ + 47, + 262, + 534, + 270 + ], + "type": "text", + "content": " XWSB is an ensemble-like model that combine two SSL front-ends [39], while all other models in Table III are based on single SSL front-end." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 284, + 299, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 284, + 299, + 438 + ], + "spans": [ + { + "bbox": [ + 47, + 284, + 299, + 438 + ], + "type": "text", + "content": "Adam optimizer [56] with a weight decay of " + }, + { + "bbox": [ + 47, + 284, + 299, + 438 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 47, + 284, + 299, + 438 + ], + "type": "text", + "content": ". To reproduce the AASIST baseline [15], we reduce the original batch size from 14 to 8 due to GPU memory constraints, and halve the learning rate from " + }, + { + "bbox": [ + 47, + 284, + 299, + 438 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-6}" + }, + { + "bbox": [ + 47, + 284, + 299, + 438 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 47, + 284, + 299, + 438 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-7}" + }, + { + "bbox": [ + 47, + 284, + 299, + 438 + ], + "type": "text", + "content": ". For Nes2Net, benefiting from its lower GPU memory consumption, we use a batch size of 12 with a learning rate of " + }, + { + "bbox": [ + 47, + 284, + 299, + 438 + ], + "type": "inline_equation", + "content": "2.5 \\times 10^{-7}" + }, + { + "bbox": [ + 47, + 284, + 299, + 438 + ], + "type": "text", + "content": ". The loss function used is weighted Cross Entropy. Following [15], we apply Rawboost augmentations [55], specifically 'series: " + }, + { + "bbox": [ + 47, + 284, + 299, + 438 + ], + "type": "inline_equation", + "content": "(1 + 2 + 3)" + }, + { + "bbox": [ + 47, + 284, + 299, + 438 + ], + "type": "text", + "content": "' (Algo4) and 'series: " + }, + { + "bbox": [ + 47, + 284, + 299, + 438 + ], + "type": "inline_equation", + "content": "(1 + 2)" + }, + { + "bbox": [ + 47, + 284, + 299, + 438 + ], + "type": "text", + "content": "' (Algo5), for AASIST baselines. For the proposed Nes2Net-X, only the former augmentation is applied. All models are trained for 100 epochs and the best checkpoint on the validation set is used for testing on the ASVspoof 2021 [48] and In-the-Wild [50] datasets." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 439, + 299, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 439, + 299, + 521 + ], + "spans": [ + { + "bbox": [ + 47, + 439, + 299, + 521 + ], + "type": "text", + "content": "ASVspoof 5: Both our AASIST baseline and the proposed Nes2Net-X models are trained using settings similar to those used for AASIST in the ASVspoof 2019 corpus. However, several differences apply. The final learning rate is set to " + }, + { + "bbox": [ + 47, + 439, + 299, + 521 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-7}" + }, + { + "bbox": [ + 47, + 439, + 299, + 521 + ], + "type": "text", + "content": ", we apply data augmentation using MUSAN [57] and RIR [58], and training is stopped if there is no improvement on the development set for 5 consecutive epochs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 522, + 299, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 522, + 299, + 677 + ], + "spans": [ + { + "bbox": [ + 47, + 522, + 299, + 677 + ], + "type": "text", + "content": "**PartialProof:** For models trained on the PartialSpoof [51], we follow the baseline systems described in [51], " + }, + { + "bbox": [ + 47, + 522, + 299, + 677 + ], + "type": "inline_equation", + "content": "[59]^4" + }, + { + "bbox": [ + 47, + 522, + 299, + 677 + ], + "type": "text", + "content": ". Specifically, we use wav2vec 2.0 as the front-end, the MSE for P2SGrad [60] as the loss function, and Adam [56] as the optimizer. Following [59], the batch size is set to 2, and a learning rate of " + }, + { + "bbox": [ + 47, + 522, + 299, + 677 + ], + "type": "inline_equation", + "content": "2.5 \\times 10^{-6}" + }, + { + "bbox": [ + 47, + 522, + 299, + 677 + ], + "type": "text", + "content": " is adopted for the baseline systems. For the proposed Nes2Net and Nes2Net-X, the learning rate is set to " + }, + { + "bbox": [ + 47, + 522, + 299, + 677 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-5}" + }, + { + "bbox": [ + 47, + 522, + 299, + 677 + ], + "type": "text", + "content": ". The pooling layer used for the proposed Nes2Net and Nes2Net-X is the Attentive Statistics Pooling [42], and the reduction ratio of SE module is set to 8. Training is terminated if no improvement is observed on the development set for 20 consecutive epochs. The epoch yielding the best performance on the development set is used for testing." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 691, + 234, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 691, + 234, + 700 + ], + "spans": [ + { + "bbox": [ + 113, + 691, + 234, + 700 + ], + "type": "text", + "content": "V. RESULTS AND ANALYSIS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 705, + 299, + 728 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 705, + 299, + 728 + ], + "spans": [ + { + "bbox": [ + 47, + 705, + 299, + 728 + ], + "type": "text", + "content": "All Equal Error Rate (EER) results in this work are reported as 'best (mean)' over multiple runs. For cited results that (1)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 311, + 285, + 563, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 285, + 563, + 307 + ], + "spans": [ + { + "bbox": [ + 311, + 285, + 563, + 307 + ], + "type": "text", + "content": "are based on a single run, (2) report only the best result, or (3) lack sufficient details, only a single value is presented." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 310, + 326, + 458, + 337 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 326, + 458, + 337 + ], + "spans": [ + { + "bbox": [ + 310, + 326, + 458, + 337 + ], + "type": "text", + "content": "A. Studies on the CtrSVDD dataset" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 310, + 341, + 563, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 341, + 563, + 568 + ], + "spans": [ + { + "bbox": [ + 310, + 341, + 563, + 568 + ], + "type": "text", + "content": "We conduct experiments on the CtrSVDD dataset [46], following two testing protocols: one including ACESinger bona fide samples and the other excluding them [38]. While results for both protocols are reported in Table III, our primary analysis focuses on the scenario 'without ACESinger bona fide (w/o ACE. B.F.)', as recommended by the dataset creators. Since AASIST " + }, + { + "bbox": [ + 310, + 341, + 563, + 568 + ], + "type": "inline_equation", + "content": "(\\mathrm{C} = 32)" + }, + { + "bbox": [ + 310, + 341, + 563, + 568 + ], + "type": "text", + "content": " in our prior work [16], as well as SLS and XWSB [39], were evaluated during the CtrSVDD Challenge 2024, portions of their test sets differ from the current official protocol. As a result, the EER by attack type is not directly comparable. To ensure a fair comparison, we re-implemented the AASIST " + }, + { + "bbox": [ + 310, + 341, + 563, + 568 + ], + "type": "inline_equation", + "content": "(\\mathrm{C} = 32)" + }, + { + "bbox": [ + 310, + 341, + 563, + 568 + ], + "type": "text", + "content": " system under the official protocol and used it as our baseline, referred to as AASIST Standard " + }, + { + "bbox": [ + 310, + 341, + 563, + 568 + ], + "type": "inline_equation", + "content": "(\\mathrm{C} = 32)" + }, + { + "bbox": [ + 310, + 341, + 563, + 568 + ], + "type": "text", + "content": " in Table III, achieving an EER of " + }, + { + "bbox": [ + 310, + 341, + 563, + 568 + ], + "type": "inline_equation", + "content": "2.79\\%" + }, + { + "bbox": [ + 310, + 341, + 563, + 568 + ], + "type": "text", + "content": " which is close to the originally reported " + }, + { + "bbox": [ + 310, + 341, + 563, + 568 + ], + "type": "inline_equation", + "content": "2.70\\%" + }, + { + "bbox": [ + 310, + 341, + 563, + 568 + ], + "type": "text", + "content": " [16]. Under the 'w/o ACE B.F.' condition, the best run achieves an EER of " + }, + { + "bbox": [ + 310, + 341, + 563, + 568 + ], + "type": "inline_equation", + "content": "3.30\\%" + }, + { + "bbox": [ + 310, + 341, + 563, + 568 + ], + "type": "text", + "content": " with an average of " + }, + { + "bbox": [ + 310, + 341, + 563, + 568 + ], + "type": "inline_equation", + "content": "3.36\\%" + }, + { + "bbox": [ + 310, + 341, + 563, + 568 + ], + "type": "text", + "content": " across three runs. Further experiments show that scaling up the AASIST model does not improve mean EER, possibly due to parameter redundancy." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "spans": [ + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "text", + "content": "We additionally evaluate several widely-used baseline systems, including ResNet [19], Res2Net [18], and ECAPATDNN [34]. ECAPA-TDNN and ResNet achieve EERs of " + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "inline_equation", + "content": "3.74\\%" + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "inline_equation", + "content": "3.61\\%" + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "text", + "content": ", respectively, which are slightly worse than that of AASIST. In contrast, Res2Net benefits from the advantages of multi-scale feature extraction, delivering the best average performance among the baseline systems with an EER of " + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "inline_equation", + "content": "3.24\\%" + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "text", + "content": ". Our proposed Nes2Net outperforms all baseline systems, achieving a mean EER of " + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "inline_equation", + "content": "2.55\\%" + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "text", + "content": " with the lowest computational cost. Furthermore, the enhanced version, Nes2Net-X, further improves the performance to " + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "inline_equation", + "content": "2.51\\%" + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "text", + "content": " EER, marking the best single-model performance reported to date. Compared to Res2Net, ResNet, ECAPA-TDNN, and SOTA AASIST (" + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "inline_equation", + "content": "C = 32" + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "text", + "content": "), Nes2Net-X achieves EER reductions of " + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "inline_equation", + "content": "23\\%" + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "inline_equation", + "content": "33\\%" + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 310, + 569, + 563, + 747 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 276, + 32 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 276, + 32 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 276, + 32 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 31 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 31 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 31 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 738, + 217, + 747 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 738, + 217, + 747 + ], + "spans": [ + { + "bbox": [ + 55, + 738, + 217, + 747 + ], + "type": "text", + "content": "4https://github.com/nii-yamagishilab/PartialSpoof" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 101, + 561, + 294 + ], + "blocks": [ + { + "bbox": [ + 46, + 57, + 563, + 94 + ], + "lines": [ + { + "bbox": [ + 46, + 57, + 563, + 94 + ], + "spans": [ + { + "bbox": [ + 46, + 57, + 563, + 94 + ], + "type": "text", + "content": "TABLE IV PERFORMANCE IN EER " + }, + { + "bbox": [ + 46, + 57, + 563, + 94 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 46, + 57, + 563, + 94 + ], + "type": "text", + "content": " ON THE CTRSVDD EVALUATION SET [46], COMPARING THE PROPOSED NES2NET WITH RES2NET AND ITS VARIOUS VARIANTS. THE RESULTS ARE REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS, E.G., 3.02 (3.24) IN THE FIRST ROW, OR AS THE RESULT OF A SINGLE EXPERIMENT, E.G., 3.21 IN THE SECOND ROW. 'B' AND 'S' REPRESENT THE NUMBER OF BLOCKS AND SCALE OF RES2NET, RESPECTIVELY." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 101, + 561, + 294 + ], + "lines": [ + { + "bbox": [ + 47, + 101, + 561, + 294 + ], + "spans": [ + { + "bbox": [ + 47, + 101, + 561, + 294 + ], + "type": "table", + "html": "
Back-endDimensionality Reduction LayerReduced Dimension DParams.MMACsPooled EERRemarks
w/o ACE. B.F.w/ ACE. B.F.
Res2Net (b=4, s=4)128452k64.933.02 (3.24)2.61 (2.78)
Res2Net (b=4, s=16)128427k59.953.212.80■ increase scale s
Res2Net (b=4, s=64)128419k58.283.152.74
Res2Net (b=4, s=128)128417k57.983.262.88
Res2Net (b=4, s=4)64180k23.254.323.76change D
Res2Net (b=4, s=4)2561,273k202.913.833.38
Res2Net-woDR (b=1, s=4)×-861k119.154.153.62
Res2Net-woDR (b=1, s=8)×-615k70.124.233.71
Res2Net-woDR (b=1, s=16)×-456k38.243.823.35remove dimensionality reduction layer and increase scale s
Res2Net-woDR (b=1, s=32)×-367k20.452.98 (3.45)2.56 (3.02)
Res2Net-woDR (b=1, s=64)×-320k11.102.73 (2.97)2.42 (2.61)
Res2Net-woDR (b=1, s=128)×-296k6.313.292.88
Res2Net-woDR (b=1, s=256)×-284k3.883.573.13
Res2Net-woDR (b=2, s=64)×-637k21.783.202.82increase depth
Res2Net-woDR (b=4, s=64)×-1,270k43.153.09 (3.18)2.73 (2.83)
Proposed Nes2Net×-511k58.112.53 (2.55)2.22 (2.27)proposed nested design
Proposed Nes2Net-X×-511k91.352.48 (2.51)2.20 (2.24)
", + "image_path": "3cc1916acf788294e6c8ae9cfda94679fa734b07114b7b845c6ca02cdef7c997.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 306, + 301, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 306, + 301, + 402 + ], + "spans": [ + { + "bbox": [ + 45, + 306, + 301, + 402 + ], + "type": "text", + "content": "We also analyze performance across different synthetic attack types using the 'w/o ACE B.F.' protocol. Except for the 'A12' attack type [46], our model consistently achieves either the best or second-best performance, demonstrating strong generalization and robustness. Notably, the 'A12' attack type, based on Singing Voice Synthesis (SVS), proves particularly challenging, showing higher EER across all models and highlighting a potential area for future improvement." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 402, + 301, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 402, + 301, + 486 + ], + "spans": [ + { + "bbox": [ + 45, + 402, + 301, + 486 + ], + "type": "text", + "content": "We observe that performance trends are consistent across both conditions, with and without ACESinger bona fide samples. Moreover, the EER is lower when ACESinger bona fide samples are included. This indicates that, even though ACESinger bona fide samples are considered out-of-domain, the trained models exhibit strong generalization capabilities and are able to classify these samples accurately." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 495, + 185, + 507 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 495, + 185, + 507 + ], + "spans": [ + { + "bbox": [ + 45, + 495, + 185, + 507 + ], + "type": "text", + "content": "B. The Roadmap of the Nes2Net" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 510, + 300, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 510, + 300, + 605 + ], + "spans": [ + { + "bbox": [ + 45, + 510, + 300, + 605 + ], + "type": "text", + "content": "In this section, we introduce the roadmap from Res2Net to the proposed Nes2Net, with detailed results summarized in Table IV. All systems are implemented and evaluated under a unified framework for fair comparison. To aid interpretation, we visualize the number of parameters, MACs, and EER. These are represented in Fig. 3 by circle size, the horizontal axis, and the vertical axis, respectively. In the following, we provide detailed analyses:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 605, + 301, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 605, + 301, + 724 + ], + "spans": [ + { + "bbox": [ + 45, + 605, + 301, + 724 + ], + "type": "text", + "content": "Investigating Res2Net: Among the baselines in Table III, the Res2Net-based back-end outperforms ResNet, AASIST, and ECAPA-TDNN on the CtrlSVDD dataset. Therefore, we select it as the reference baseline for further investigation. First, we experiment with adjusting the scale " + }, + { + "bbox": [ + 45, + 605, + 301, + 724 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 45, + 605, + 301, + 724 + ], + "type": "text", + "content": " of Res2Net. We observe that as " + }, + { + "bbox": [ + 45, + 605, + 301, + 724 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 45, + 605, + 301, + 724 + ], + "type": "text", + "content": " increases, the number of split groups increases linearly; however, the performance shows no significant improvement (depicted as the teal blue line in Fig. 3). This may be because adding too many split groups dilutes the feature representation, leading to redundancy." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "spans": [ + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "text", + "content": "Next, we explore varying the dimensionality of the output features from the DR layer (referred to as Reduced Dimension" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 305, + 564, + 596 + ], + "blocks": [ + { + "bbox": [ + 310, + 305, + 564, + 596 + ], + "lines": [ + { + "bbox": [ + 310, + 305, + 564, + 596 + ], + "spans": [ + { + "bbox": [ + 310, + 305, + 564, + 596 + ], + "type": "image", + "image_path": "b205df6253934c495cd6bf54f649c643eae158455b8a5a57b5838ccd4e16a70f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 308, + 599, + 564, + 620 + ], + "lines": [ + { + "bbox": [ + 308, + 599, + 564, + 620 + ], + "spans": [ + { + "bbox": [ + 308, + 599, + 564, + 620 + ], + "type": "text", + "content": "Fig. 3. Visualization of Table III and IV, highlighting our exploration of Res2Net and the roadmap of architectural changes leading to Nes2Net." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "spans": [ + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "text", + "content": ", depicted as the steel gray line in Fig. 3). Reducing " + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "text", + "content": " to 64 significantly lowers model size and MACs, compared to the default " + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "inline_equation", + "content": "D = 128" + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "text", + "content": ", but leads to substantial performance degradation, increasing EER from " + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "inline_equation", + "content": "3.02\\%" + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "inline_equation", + "content": "4.32\\%" + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "text", + "content": ". Conversely, increasing " + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "text", + "content": " to 256 results in a much larger model size and MACs but still leads to worse performance than " + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "inline_equation", + "content": "D = 128" + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "text", + "content": ". This may be because a larger " + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "text", + "content": " introduces over-parameterization and noise. This may explain why " + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "inline_equation", + "content": "D = 128" + }, + { + "bbox": [ + 307, + 628, + 564, + 736 + ], + "type": "text", + "content": " is commonly adopted in SOTA models [15], [16]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "type": "text", + "content": "Removal of DR Layer: Foundation models often incorpo" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 150 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 150 + ], + "type": "text", + "content": "rate a DR layer in their back-end architecture to compress high-dimensional features into lower-dimensional representations, facilitating downstream tasks. For instance, models like wav2vec 2.0-AASIST [15] utilize such a layer alongside task-specific classifiers (e.g., AASIST, ResNet). However, as discussed in Section II-E, this projection layer consumes a substantial portion of the back-end model's parameters and MACs while potentially causing information loss." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 151, + 301, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 151, + 301, + 247 + ], + "spans": [ + { + "bbox": [ + 45, + 151, + 301, + 247 + ], + "type": "text", + "content": "To explore whether bypassing this layer preserves more task-relevant information, we propose a new back-end model: ResNet without Dimensionality Reduction (ResNet-woDR). By directly processing high-dimensional features, ResNet-woDR simplifies the architecture and focuses on the raw features extracted by the speech foundation model. The naming emphasizes the absence of a DR layer, differentiating it from traditional approaches." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 247, + 301, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 247, + 301, + 320 + ], + "spans": [ + { + "bbox": [ + 45, + 247, + 301, + 320 + ], + "type": "text", + "content": "We further evaluate the performance of ResNet-woDR with different scales " + }, + { + "bbox": [ + 45, + 247, + 301, + 320 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 45, + 247, + 301, + 320 + ], + "type": "text", + "content": " (depicted as the green line in Fig. 3). The best performance is observed with " + }, + { + "bbox": [ + 45, + 247, + 301, + 320 + ], + "type": "inline_equation", + "content": "s = 64" + }, + { + "bbox": [ + 45, + 247, + 301, + 320 + ], + "type": "text", + "content": ", achieving a mean EER of " + }, + { + "bbox": [ + 45, + 247, + 301, + 320 + ], + "type": "inline_equation", + "content": "2.97\\%" + }, + { + "bbox": [ + 45, + 247, + 301, + 320 + ], + "type": "text", + "content": ", which surpasses the best Res2Net baseline. Increasing " + }, + { + "bbox": [ + 45, + 247, + 301, + 320 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 45, + 247, + 301, + 320 + ], + "type": "text", + "content": " beyond this point leads to a decline in performance, likely due to the following factors:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 324, + 301, + 456 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 55, + 324, + 301, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 324, + 301, + 360 + ], + "spans": [ + { + "bbox": [ + 55, + 324, + 301, + 360 + ], + "type": "text", + "content": "- Feature Dilution. A large " + }, + { + "bbox": [ + 55, + 324, + 301, + 360 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 55, + 324, + 301, + 360 + ], + "type": "text", + "content": " excessively fragments feature representations, weakening their expressiveness and resulting in diluted, less informative features [18]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 360, + 301, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 360, + 301, + 396 + ], + "spans": [ + { + "bbox": [ + 56, + 360, + 301, + 396 + ], + "type": "text", + "content": "- Redundant Transformations. An overly large " + }, + { + "bbox": [ + 56, + 360, + 301, + 396 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 56, + 360, + 301, + 396 + ], + "type": "text", + "content": " introduces unnecessary feature transformations, leading to overfitting and reduced generalization [43]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 396, + 301, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 396, + 301, + 456 + ], + "spans": [ + { + "bbox": [ + 56, + 396, + 301, + 456 + ], + "type": "text", + "content": "- Restricted Feature Interaction. Since channels are unordered, distant groups may still contain correlated information. In this case, the additional convolutional layers introduced by splitting limit their interactions, weakening the model's ability to capture complex patterns." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 45, + 459, + 301, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 459, + 301, + 543 + ], + "spans": [ + { + "bbox": [ + 45, + 459, + 301, + 543 + ], + "type": "text", + "content": "Based on the optimal " + }, + { + "bbox": [ + 45, + 459, + 301, + 543 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 45, + 459, + 301, + 543 + ], + "type": "text", + "content": ", we increase the number of blocks " + }, + { + "bbox": [ + 45, + 459, + 301, + 543 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 45, + 459, + 301, + 543 + ], + "type": "text", + "content": " to deepen the model (depicted as the light pink line in Fig. 3). However, no further performance improvement is observed. This could be attributed to the deeper architecture's limited ability to effectively utilize the additional parameters, resulting in diminishing performance gains. It may also increase the risk of overfitting." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 544, + 301, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 544, + 301, + 628 + ], + "spans": [ + { + "bbox": [ + 45, + 544, + 301, + 628 + ], + "type": "text", + "content": "The Novel Nested Design: Prior experiments demonstrate that removing the DR layer enhances the performance of Res2Net. We believe that directly extracting information from high-dimensional speech foundation model features avoids the information loss introduced by DR. Our experiments with variations in scale, depth, and dimensionality show that a mean EER of " + }, + { + "bbox": [ + 45, + 544, + 301, + 628 + ], + "type": "inline_equation", + "content": "2.97\\%" + }, + { + "bbox": [ + 45, + 544, + 301, + 628 + ], + "type": "text", + "content": " marks a performance bottleneck for this design." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 628, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 628, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 45, + 628, + 301, + 748 + ], + "type": "text", + "content": "Compared to ResNet-woDR, the proposed Nes2Net adopts a novel nested design that enhances flexibility and significantly boosts the model's representational capacity. Processing larger feature subsets in the outer layer facilitates better interactions across channels within each nested layer. Furthermore, the integrated local cross-channel attention mechanism enhances feature selection while mitigating redundancy, addressing limitations in prior designs. This architectural refinement overcomes the performance limitations observed in the original Res2Net design. As a result, Nes2Net and its enhanced variant" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 54, + 564, + 79 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 54, + 564, + 79 + ], + "spans": [ + { + "bbox": [ + 308, + 54, + 564, + 79 + ], + "type": "text", + "content": "Nes2Net-X surpass the earlier performance bottleneck, achieving mean EERs of " + }, + { + "bbox": [ + 308, + 54, + 564, + 79 + ], + "type": "inline_equation", + "content": "2.55\\%" + }, + { + "bbox": [ + 308, + 54, + 564, + 79 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 54, + 564, + 79 + ], + "type": "inline_equation", + "content": "2.51\\%" + }, + { + "bbox": [ + 308, + 54, + 564, + 79 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 93, + 484, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 93, + 484, + 105 + ], + "spans": [ + { + "bbox": [ + 309, + 93, + 484, + 105 + ], + "type": "text", + "content": "C. Studies on the ASVspoof 2021 dataset" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 310, + 110, + 563, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 110, + 563, + 192 + ], + "spans": [ + { + "bbox": [ + 310, + 110, + 563, + 192 + ], + "type": "text", + "content": "TABLE V PERFORMANCE IN EER " + }, + { + "bbox": [ + 310, + 110, + 563, + 192 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 310, + 110, + 563, + 192 + ], + "type": "text", + "content": " ON THE ASVspoof 2021 LA AND DF. THE RESULTS ARE REPORTED IN THE FORMAT OF 'BEST (MEAN). CKPT AVG. REFERS TO THE NUMBER OF CHECKPOINTS AVERAGED. " + }, + { + "bbox": [ + 310, + 110, + 563, + 192 + ], + "type": "inline_equation", + "content": "\\ddagger" + }, + { + "bbox": [ + 310, + 110, + 563, + 192 + ], + "type": "text", + "content": " DENOTES RE-IMPLEMENTATION CONDUCTED BY US. 'ALGO4' AND 'ALGO5' REPRESENT RAWBOOST SERIES AUGMENTATIONS: " + }, + { + "bbox": [ + 310, + 110, + 563, + 192 + ], + "type": "inline_equation", + "content": "(1 + 2 + 3)" + }, + { + "bbox": [ + 310, + 110, + 563, + 192 + ], + "type": "text", + "content": " AND " + }, + { + "bbox": [ + 310, + 110, + 563, + 192 + ], + "type": "inline_equation", + "content": "(1 + 2)" + }, + { + "bbox": [ + 310, + 110, + 563, + 192 + ], + "type": "text", + "content": " [55], RESPECTIVELY. PARAMETERS THAT ARE UNDERlined ARE CALCULATED BY US. " + }, + { + "bbox": [ + 310, + 110, + 563, + 192 + ], + "type": "inline_equation", + "content": "-" + }, + { + "bbox": [ + 310, + 110, + 563, + 192 + ], + "type": "text", + "content": " REPRESENTS UNKNOWN. N/A INDICATES THAT THE SYSTEM DOES NOT USE THE AVERAGE CHECKPOINTS METHOD." + } + ] + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 309, + 195, + 564, + 467 + ], + "blocks": [ + { + "bbox": [ + 309, + 195, + 564, + 467 + ], + "lines": [ + { + "bbox": [ + 309, + 195, + 564, + 467 + ], + "spans": [ + { + "bbox": [ + 309, + 195, + 564, + 467 + ], + "type": "table", + "html": "
RemarkFront-endBack-end ModelBack-end ParametersCKPT Avg.ASVspoof 2021
LADF
2022wav2vec2.0 FIR-NB [61]--3.546.18
2022wav2vec2.0 FIR-WB [61]--7.084.98
2022wav2vec2.0 LGF [62]--9.664.75
2023wav2vec2.0 Conformer (fix) [63]2,506k551.382.27
2023wav2vec2.0 Conformer (var) [63]2,506k50.877.36
2024wav2vec2.0 Ensembling [64] ‡--2.32 (4.48)5.60 (8.74)
2024WavLMASP+MLP [65]1,051k-3.314.47
2024wav2vec2.0 SLIM [14]---(4.4)
2024WavLMAttM-LSTM [31]936k6N/A3.503.19
2024wav2vec2.0 FTDKD [66]--2.962.82
2024wav2vec2.0 AASIST2 [67]--1.612.77
2024wav2vec2.0 MFA [68]--5.082.56
2024wav2vec2.0 MoE [69]--2.962.54
2024wav2vec2.0 OCKD [70]--0.902.27
2024wav2vec2.0 TCM [33]2,383k751.032.06
2024wav2vec2.0 SLS [35]23,399k8-2.87 (3.88)1.92 (2.09)
2025wav2vec2.0 LSR+LSA [71]--1.192.43
2025wav2vec2.0 LSR+LSA [71] ※--1.051.86
2025wav2vec2.0 WaveSpec [72]---1.90
2025wav2vec2.0 Mamba [17]1,937k950.931.88
2025wav2vec2.0 SSL-EOW-S. [73] ‡---1.75 (2.91)
2025wav2vec2.0 Cal. Ensemble [73] ‡---(2.03)
2022wav2vec2.0 AASIST [15]447k10N/A0.82 (1.00)2.85 (3.69)
wav2vec2.0 AASIST (algo4)447kN/A1.13 (1.36)3.37 (4.09)
wav2vec2.0 AASIST (algo5)447kN/A0.93 (1.40)3.56 (5.07)
Ourswav2vec2.0 Nes2Net511kN/A1.61 (1.90)1.89 (2.12)
wav2vec2.0 Nes2Net-X511kN/A1.73 (1.95)1.65 (1.91)
wav2vec2.0 Nes2Net-X511k31.66 (1.87)1.54 (1.98)
wav2vec2.0 Nes2Net-X511k51.88 (2.00)1.49 (1.78)
", + "image_path": "8e23480718b64a36e60a649ebac4e58f7fde3d839dc800ff33cb052003b55e25.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 310, + 466, + 419, + 482 + ], + "lines": [ + { + "bbox": [ + 310, + 466, + 419, + 482 + ], + "spans": [ + { + "bbox": [ + 310, + 466, + 419, + 482 + ], + "type": "text", + "content": "\\*: with extra data augmentation [71] " + }, + { + "bbox": [ + 310, + 466, + 419, + 482 + ], + "type": "inline_equation", + "content": "\\ddagger" + }, + { + "bbox": [ + 310, + 466, + 419, + 482 + ], + "type": "text", + "content": " : ensemble of multiple models" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 488, + 564, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 488, + 564, + 643 + ], + "spans": [ + { + "bbox": [ + 307, + 488, + 564, + 643 + ], + "type": "text", + "content": "The ASVspoof series datasets are widely used as benchmarks for advancing research in detecting spoofed speech [47], [48]. Following the standard protocol, we train models on ASVspoof 2019 [47] and evaluate them on ASVspoof 2021 Logical Access (LA) and Deepfake (DF) tasks [48]. The LA task focuses on detecting synthetic and voice-converted speech transmitted over telephony systems, introducing challenges related to channel effects and transmission variability. In contrast, the DF task targets detecting manipulated, compressed speech data commonly found on online platforms. This reflects real-world scenarios where deepfake audio circulates, making the DF task a valuable benchmark for evaluating deepfake detection systems." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 643, + 564, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 643, + 564, + 680 + ], + "spans": [ + { + "bbox": [ + 308, + 643, + 564, + 680 + ], + "type": "text", + "content": "The results in Table V show that for the LA track, our Nes2Net achieves a mean EER of " + }, + { + "bbox": [ + 308, + 643, + 564, + 680 + ], + "type": "inline_equation", + "content": "1.90\\%" + }, + { + "bbox": [ + 308, + 643, + 564, + 680 + ], + "type": "text", + "content": ", comparable to SOTA systems. For the DF track, which more closely reflects" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 687, + 557, + 748 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 315, + 687, + 557, + 698 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 687, + 557, + 698 + ], + "spans": [ + { + "bbox": [ + 315, + 687, + 557, + 698 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 315, + 687, + 557, + 698 + ], + "type": "text", + "content": "https://github.com/ErosRos/conformer-based-classifier-for-anti-spoofing" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 698, + 512, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 698, + 512, + 708 + ], + "spans": [ + { + "bbox": [ + 317, + 698, + 512, + 708 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 317, + 698, + 512, + 708 + ], + "type": "text", + "content": "https://github.com/pandartialdTJU/AttM_INTERSPEECH24" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 708, + 459, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 708, + 459, + 718 + ], + "spans": [ + { + "bbox": [ + 317, + 708, + 459, + 718 + ], + "type": "text", + "content": "7https://github.com/ductuantruong/tcm_add" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 318, + 718, + 519, + 727 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 718, + 519, + 727 + ], + "spans": [ + { + "bbox": [ + 318, + 718, + 519, + 727 + ], + "type": "inline_equation", + "content": "^{8}" + }, + { + "bbox": [ + 318, + 718, + 519, + 727 + ], + "type": "text", + "content": "https://github.com/QiShanZhang/SLSforASVspoof-2021-DF" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 318, + 727, + 466, + 738 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 727, + 466, + 738 + ], + "spans": [ + { + "bbox": [ + 318, + 727, + 466, + 738 + ], + "type": "text", + "content": "9https://github.com/swagshaw/XLSR-Mamba" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 319, + 738, + 490, + 748 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 738, + 490, + 748 + ], + "spans": [ + { + "bbox": [ + 319, + 738, + 490, + 748 + ], + "type": "inline_equation", + "content": "^{10}" + }, + { + "bbox": [ + 319, + 738, + 490, + 748 + ], + "type": "text", + "content": "https://github.com/TakHemlata/SSL_Anti-spoofing" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 97, + 563, + 437 + ], + "blocks": [ + { + "bbox": [ + 48, + 57, + 563, + 94 + ], + "lines": [ + { + "bbox": [ + 48, + 57, + 563, + 94 + ], + "spans": [ + { + "bbox": [ + 48, + 57, + 563, + 94 + ], + "type": "text", + "content": "TABLE VI PERFORMANCE IN EER " + }, + { + "bbox": [ + 48, + 57, + 563, + 94 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 48, + 57, + 563, + 94 + ], + "type": "text", + "content": " FOR DIFFERENT TYPES OF VOCODERS AND COMPRESSION CONDITIONS ON THE ASVSPOOF 2021 DF TEST SET. THE FIVE EER VALUES FOR EACH SUB-ITEM, FROM LEFT TO RIGHT, CORRESPOND TO NES2NET-X, MAMBA [17], SLS [35], TCM [33], AND AASIST [15].THE BEST PERFORMANCE IS REPORTED IN BOLD FONTS, AND THE SECOND-BEST IS UNDERLINED." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 97, + 563, + 437 + ], + "lines": [ + { + "bbox": [ + 47, + 97, + 563, + 437 + ], + "spans": [ + { + "bbox": [ + 47, + 97, + 563, + 437 + ], + "type": "table", + "html": "
Traditional VocoderWav ConcatenationNeural Autoreg.Neural Non-autoreg.UnknownPooled EER
C1 -0.36/0.78/1.21/0.95/1.220.76/0.76/0.80/0.76/2.282.70/3.88/3.12/3.89/3.450.52/0.87/0.68/0.95/1.561.64/1.63/1.23/1.73/1.991.47/1.89/1.72/2.23/2.34
C2 Low mp31.48/0.94/1.94/1.67/2.722.96/2.20/2.16/2.56/5.842.89/3.23/2.71/3.59/5.961.23/0.86/0.78/1.32/3.332.54/1.69/1.65/1.93/4.301.75/1.84/2.02/2.11/4.30
C3 High mp30.44/0.88/1.39/0.96/1.831.13/1.49/1.17/1.45/3.352.47/3.35/2.91/3.70/3.790.44/0.87/0.69/0.88/2.022.29/1.85/1.34/1.67/2.651.32/1.85/1.59/1.95/2.64
C4 Low m4a0.44/0.95/1.48/1.22/1.571.15/0.85/1.24/1.67/2.092.79/3.39/2.79/3.40/3.750.54/0.96/0.70/1.22/1.651.32/1.22/1.14/1.41/2.101.40/1.92/1.74/2.01/2.37
C5 High m4a0.45/0.80/1.34/0.98/1.160.62/0.76/0.71/0.76/2.102.77/3.48/2.96/3.73/3.390.56/0.90/0.64/1.07/1.341.88/1.70/1.34/1.43/1.871.59/2.05/1.79/1.96/2.14
C6 Low ogg0.69/1.13/2.14/1.44/2.350.80/0.97/0.91/0.91/2.231.92/2.80/2.44/2.79/3.670.48/0.78/0.61/0.84/1.621.05/1.14/1.00/1.01/2.231.09/1.61/1.88/1.87/2.58
C7 High ogg0.70/1.13/1.52/1.35/1.570.62/0.80/0.71/0.80/1.502.05/2.84/2.26/2.66/2.920.43/0.65/0.52/0.74/1.001.34/1.05/0.96/0.96/1.271.35/1.61/1.57/1.74/1.92
C8 mp3→m4a0.95/1.26/2.28/1.74/3.011.52/0.97/1.08/1.08/2.962.22/3.01/2.31/2.96/4.490.61/0.57/0.65/0.95/2.051.61/1.18/1.09/1.18/2.661.48/1.65/1.92/1.97/3.31
C9 ogg→m4a0.70/1.26/2.15/1.49/2.280.88/0.97/0.99/0.88/2.521.92/3.01/2.57/2.88/3.760.52/0.70/0.65/0.78/1.570.96/1.09/1.09/1.05/2.141.13/1.79/2.04/1.88/2.75
Pooled EER0.72/1.14/1.88/1.40/2.151.10/1.05/1.07/1.14/2.852.70/3.32/2.86/3.40/4.050.63/0.80/0.69/0.94/1.841.86/1.43/1.23/1.38/2.451.49/1.88/1.92/2.06/2.85
Traditional VocoderWav ConcatenationNeural AutoregressionNeural Non-autoregressionUnknownPooled EER
C1
C2
C3
C4
C5
C6
C7
C8
C9
Pooled EER
", + "image_path": "aaf4389be4b7945f02c8bda6ea039fe9c5e53dd5e8bc867b886ce5c647d07ecd.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 438, + 565, + 468 + ], + "lines": [ + { + "bbox": [ + 45, + 438, + 565, + 468 + ], + "spans": [ + { + "bbox": [ + 45, + 438, + 565, + 468 + ], + "type": "text", + "content": "Fig. 4. Visualization of the EER " + }, + { + "bbox": [ + 45, + 438, + 565, + 468 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 45, + 438, + 565, + 468 + ], + "type": "text", + "content": " across various vocoders and compression conditions on the ASVspoof 2021 DF test set. Each EER value is shown as a colored circle, where the size indicates the EER value, and the color represents the performance ranking among the five models: blue (best) to light red (worst). The five EER values for each sub-item, from left to right, correspond to the proposed Nes2Net-X, Mamba [17], SLS [35], TCM [33], and AASIST [15]." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "spans": [ + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "text", + "content": "real-world scenarios as discussed earlier, the baseline system AASIST [15] achieves its best EER of " + }, + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "inline_equation", + "content": "2.85\\%" + }, + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "text", + "content": " and a mean EER of " + }, + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "inline_equation", + "content": "3.69\\%" + }, + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "text", + "content": ", remaining competitive with current SOTA systems. The SLS [35] and TCM [33] models achieve EERs close to " + }, + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "text", + "content": ", demonstrating strong performance at the SOTA level. The Mamba-based [17] model further improves results, reducing the EER to " + }, + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "inline_equation", + "content": "1.88\\%" + }, + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "text", + "content": ". Notably, our proposed Nes2Net attains its best EER of " + }, + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "inline_equation", + "content": "1.89\\%" + }, + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "text", + "content": " and a mean EER of " + }, + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "inline_equation", + "content": "2.12\\%" + }, + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "text", + "content": " EER, comparable to the performance of current SOTA systems. The enhanced variant, Nes2Net-X achieves the best performance among all compared systems, with its best EER of " + }, + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "inline_equation", + "content": "1.65\\%" + }, + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "text", + "content": " and a mean EER of " + }, + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "inline_equation", + "content": "1.91\\%" + }, + { + "bbox": [ + 45, + 480, + 301, + 624 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 625, + 300, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 625, + 300, + 734 + ], + "spans": [ + { + "bbox": [ + 45, + 625, + 300, + 734 + ], + "type": "text", + "content": "Inspired by prior works [17], [33], we average the weights of several top-performing checkpoints on the validation set to obtain an improved model. This approach further improves the performance of the DF task to a best EER of " + }, + { + "bbox": [ + 45, + 625, + 300, + 734 + ], + "type": "inline_equation", + "content": "1.49\\%" + }, + { + "bbox": [ + 45, + 625, + 300, + 734 + ], + "type": "text", + "content": " and a mean EER of " + }, + { + "bbox": [ + 45, + 625, + 300, + 734 + ], + "type": "inline_equation", + "content": "1.78\\%" + }, + { + "bbox": [ + 45, + 625, + 300, + 734 + ], + "type": "text", + "content": ", which, to the best of our knowledge, is the best performance reported to date. Furthermore, compared to Mamba [17], our model achieves this performance with approximately " + }, + { + "bbox": [ + 45, + 625, + 300, + 734 + ], + "type": "inline_equation", + "content": "74\\%" + }, + { + "bbox": [ + 45, + 625, + 300, + 734 + ], + "type": "text", + "content": " fewer parameters, demonstrating superior efficiency." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 736, + 301, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 736, + 301, + 749 + ], + "spans": [ + { + "bbox": [ + 55, + 736, + 301, + 749 + ], + "type": "text", + "content": "The analysis above summarizes overall performance on the" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 479, + 564, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 479, + 564, + 576 + ], + "spans": [ + { + "bbox": [ + 307, + 479, + 564, + 576 + ], + "type": "text", + "content": "DF test set. The DF dataset also provides detailed labels for vocoder types and compression conditions, enabling more fine-grained analysis. To further evaluate performance, we compare the SOTA models Mamba, SLS, TCM, and AASIST with our proposed Nes2Net-X across these sub-tracks. The results are presented in Table VI. To improve readability and make the extensive numerical data easier to interpret, we also visualize the table's results in Fig. 4." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 578, + 564, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 578, + 564, + 734 + ], + "spans": [ + { + "bbox": [ + 307, + 578, + 564, + 734 + ], + "type": "text", + "content": "For traditional vocoders, all models perform well, with most EERs below " + }, + { + "bbox": [ + 307, + 578, + 564, + 734 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 307, + 578, + 564, + 734 + ], + "type": "text", + "content": ". Notably, our proposed Nes2Net-X achieves exceptional results, consistently yielding EERs under " + }, + { + "bbox": [ + 307, + 578, + 564, + 734 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 307, + 578, + 564, + 734 + ], + "type": "text", + "content": " across all conditions except C2. This demonstrates the strong stability of Nes2Net-X when handling unseen and relatively simple scenarios. In contrast, for neural autoregressive vocoders, all models experience a noticeable drop in performance, with EER reaching up to " + }, + { + "bbox": [ + 307, + 578, + 564, + 734 + ], + "type": "inline_equation", + "content": "5.96\\%" + }, + { + "bbox": [ + 307, + 578, + 564, + 734 + ], + "type": "text", + "content": ". This indicates the greater challenge posed by the sequential and dynamic nature of autoregressive vocoders, which introduce higher variability in synthesis. Nevertheless, Nes2Net-X maintains a clear advantage over the competing models, demonstrating its robustness in handling these complex synthesis conditions." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "type": "text", + "content": "From the perspective of compression conditions, the differ" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 564, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 564, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 564, + 32 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 126 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 126 + ], + "type": "text", + "content": "ences in model performance are less pronounced compared to the variations observed across vocoder types. Nes2Net-X consistently achieves the lowest EERs across all compression conditions, regardless of the level of distortion introduced by compression. This consistency highlights the model's strong generalization ability across different levels of compressions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 126, + 301, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 126, + 301, + 209 + ], + "spans": [ + { + "bbox": [ + 45, + 126, + 301, + 209 + ], + "type": "text", + "content": "Overall, these findings demonstrate that Nes2Net-X is not only highly effective across diverse vocoder types, but also maintains superior performance under varying compression conditions. This robustness underscores the model's capability to handle both compression diversity and complex synthesis challenges, making it a reliable solution for deepfake audio detection across a wide range of scenarios." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 220, + 220, + 231 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 220, + 220, + 231 + ], + "spans": [ + { + "bbox": [ + 45, + 220, + 220, + 231 + ], + "type": "text", + "content": "D. The results on the In-the-Wild dataset" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 233, + 300, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 233, + 300, + 262 + ], + "spans": [ + { + "bbox": [ + 45, + 233, + 300, + 262 + ], + "type": "text", + "content": "TABLE VII PERFORMANCE IN EER " + }, + { + "bbox": [ + 45, + 233, + 300, + 262 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 45, + 233, + 300, + 262 + ], + "type": "text", + "content": " ON THE IN-THE-WILD [50] DATASET. OUR RESULT IS REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 48, + 267, + 300, + 415 + ], + "blocks": [ + { + "bbox": [ + 48, + 267, + 300, + 415 + ], + "lines": [ + { + "bbox": [ + 48, + 267, + 300, + 415 + ], + "spans": [ + { + "bbox": [ + 48, + 267, + 300, + 415 + ], + "type": "table", + "html": "
Front-endYearBack-endEER
wav2vec 2.02022Rawnet&ASSIST (reported by [35])10.46
2024SLIM [14]- (12.5)
2024MoE [69]9.17
2024Conformer [63]8.42
2024TCM [33]7.79
2024OCKD [70]7.68
2024SLS [35]7.46 (8.87)
2024Pascu et al. [74]- (7.2)
2025Mamba [17]6.71
2025WaveSpec [72]6.58
2025LSR+LSA [71]5.92
2025LSR+LSA [71]※5.54
-Proposed Nes2Net5.80 (7.06)
-Proposed Nes2Net-X5.52 (6.60)
", + "image_path": "79157a288ac5f97faf334aba645a2a71ff54ca0a6409da7b5db961540f267480.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 71, + 415, + 196, + 423 + ], + "lines": [ + { + "bbox": [ + 71, + 415, + 196, + 423 + ], + "spans": [ + { + "bbox": [ + 71, + 415, + 196, + 423 + ], + "type": "inline_equation", + "content": "\\text{※}" + }, + { + "bbox": [ + 71, + 415, + 196, + 423 + ], + "type": "text", + "content": " with extra data augmentation [71]" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 426, + 300, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 426, + 300, + 497 + ], + "spans": [ + { + "bbox": [ + 45, + 426, + 300, + 497 + ], + "type": "text", + "content": "The In-the-Wild dataset [50] is a collection of deepfake videos sourced from the internet. Unlike controlled datasets, it captures the diverse and unpredictable nature of real-world scenarios. This diversity is essential for developing and evaluating deepfake detection models, as it challenges them to generalize effectively across a wide range of conditions." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 498, + 301, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 498, + 301, + 593 + ], + "spans": [ + { + "bbox": [ + 45, + 498, + 301, + 593 + ], + "type": "text", + "content": "In addition, unlike many other datasets that rely on self-generated fake audio, this dataset is collected from publicly available video and audio files explicitly labeled as audio deepfakes [50]. To account for the potential presence of partial spoofing, we evaluate our proposed Nes2Net and Nes2Net-X using the entire duration of each test sample instead of restricting it to the first 4 seconds, as the latter approach risks missing partially spoofed segments." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 593, + 301, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 593, + 301, + 700 + ], + "spans": [ + { + "bbox": [ + 45, + 593, + 301, + 700 + ], + "type": "text", + "content": "The testing results, alongside SOTA models, are reported in Table VII. We find that the overall performance trends are consistent with those seen on the ASVspoof 2021 DF dataset. However, EERs on the In-the-Wild dataset are generally higher than those on the DF dataset, reflecting greater complexity and variability in real-world scenarios. Notably, the proposed Nes2Net-X outperforms all SOTA models, achieving the lowest EER of " + }, + { + "bbox": [ + 45, + 593, + 301, + 700 + ], + "type": "inline_equation", + "content": "5.52\\%" + }, + { + "bbox": [ + 45, + 593, + 301, + 700 + ], + "type": "text", + "content": " and a mean EER of " + }, + { + "bbox": [ + 45, + 593, + 301, + 700 + ], + "type": "inline_equation", + "content": "6.60\\%" + }, + { + "bbox": [ + 45, + 593, + 301, + 700 + ], + "type": "text", + "content": " on this challenging dataset." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 709, + 222, + 722 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 709, + 222, + 722 + ], + "spans": [ + { + "bbox": [ + 45, + 709, + 222, + 722 + ], + "type": "text", + "content": "E. The results on the ASVspoof 5 dataset" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 724, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 724, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 45, + 724, + 301, + 748 + ], + "type": "text", + "content": "The ASVspoof 5 dataset represents the most recent edition in the ASVspoof series. Unlike earlier versions, it introduces" + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 312, + 116, + 561, + 186 + ], + "blocks": [ + { + "bbox": [ + 312, + 49, + 560, + 113 + ], + "lines": [ + { + "bbox": [ + 312, + 49, + 560, + 113 + ], + "spans": [ + { + "bbox": [ + 312, + 49, + 560, + 113 + ], + "type": "text", + "content": "TABLE VIII A COMPARISON BETWEEN THE PROPOSED NES2NET AND THE AASIST BASELINE SYSTEM ON THE ASVSPOOF 5 DATASET [49]. 'PARAMS.' AND 'MMACs' REFER TO THE NUMBER OF PARAMETERS AND THE NUMBER OF MILLION MULTIPLY-ACCUMULATE OPERATIONS, RESPECTIVELY. 'AVG.' INDICATES THE AVERAGE RELATIVE PERFORMANCE IMPROVEMENT ACROSS ALL THREE EVALUATION METRICS." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 312, + 116, + 561, + 186 + ], + "lines": [ + { + "bbox": [ + 312, + 116, + 561, + 186 + ], + "spans": [ + { + "bbox": [ + 312, + 116, + 561, + 186 + ], + "type": "table", + "html": "
Back-endPerformance
ModelParams.↓MMACs↓CLLR↓minDCF↓EER↓Avg.
AASIST447k707.650.95870.16456.08Benchmark
Nes2Net511k58.110.79120.15686.137.1%
Nes2Net-X511k91.350.73440.15355.9210.9%
", + "image_path": "659129421e45a9fe814172a8f9a34bba84b17527efa124efa6a95150223c258d.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 200, + 564, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 200, + 564, + 391 + ], + "spans": [ + { + "bbox": [ + 307, + 200, + 564, + 391 + ], + "type": "text", + "content": "adversarial attacks and is crowdsourced under various acoustic conditions [49]. As it is newly released, there are currently no existing systems available for a fair comparison. Therefore, we re-implement the AASIST system as a baseline and compare it with our proposed Nes2Net and Nes2Net-X model. Following the ASVspoof 5 challenge guidelines [49], we use WavLM [3] as the front-end. Based the evaluation protocol in [37], we assess performance using three metrics: Cost of Log-Likelihood Ratio (CLLR), minimum Detection Cost Function (minDCF), and EER, and present the results in Table VIII. We observe that the Nes2Net and Nes2Net-X backend models result in only a slight increase in the number of parameters compared to AASIST, while significantly reducing MMMs. Moreover, across all three evaluation metrics, the Nes2Net and Nes2Net-X back-ends improve performance by " + }, + { + "bbox": [ + 307, + 200, + 564, + 391 + ], + "type": "inline_equation", + "content": "7.1\\%" + }, + { + "bbox": [ + 307, + 200, + 564, + 391 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 307, + 200, + 564, + 391 + ], + "type": "inline_equation", + "content": "10.9\\%" + }, + { + "bbox": [ + 307, + 200, + 564, + 391 + ], + "type": "text", + "content": ", receptively." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 309, + 410, + 485, + 423 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 410, + 485, + 423 + ], + "spans": [ + { + "bbox": [ + 309, + 410, + 485, + 423 + ], + "type": "text", + "content": "F. The results on the PartialSpoof dataset" + } + ] + } + ], + "index": 16 + }, + { + "type": "table", + "bbox": [ + 316, + 475, + 558, + 576 + ], + "blocks": [ + { + "bbox": [ + 313, + 434, + 559, + 471 + ], + "lines": [ + { + "bbox": [ + 313, + 434, + 559, + 471 + ], + "spans": [ + { + "bbox": [ + 313, + 434, + 559, + 471 + ], + "type": "text", + "content": "TABLE IX PERFORMANCE IN EER " + }, + { + "bbox": [ + 313, + 434, + 559, + 471 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 313, + 434, + 559, + 471 + ], + "type": "text", + "content": " ON THE PARTIALSPOOF [51] DATASET. THE RESULTS ARE REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS. " + }, + { + "bbox": [ + 313, + 434, + 559, + 471 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 313, + 434, + 559, + 471 + ], + "type": "text", + "content": " INDICATES RESULTS OBTAINED FROM OUR IMPLEMENTATION." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 316, + 475, + 558, + 576 + ], + "lines": [ + { + "bbox": [ + 316, + 475, + 558, + 576 + ], + "spans": [ + { + "bbox": [ + 316, + 475, + 558, + 576 + ], + "type": "table", + "html": "
Front-endYearBack-endPartialSpoof [51]
DevEval
wav2vec 2.02024gMLP [51]0.350.64
-gMLP†0.39 (0.43)0.72 (0.80)
20241D Res2Net [59]0.350.73
-1D Res2Net†0.35 (0.38)0.73 (0.79)
-SE ResNet†0.31 (0.50)0.77 (0.78)
-Nes2Net0.24 (0.36)0.53 (0.68)
-Nes2Net-X0.20 (0.33)0.57 (0.64)
", + "image_path": "ff815d3d676fe98054c7f2384cec4ecceab1946ec4936157be8fb0c8b58b53f0.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "table_body" + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 592, + 563, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 592, + 563, + 664 + ], + "spans": [ + { + "bbox": [ + 307, + 592, + 563, + 664 + ], + "type": "text", + "content": "Partially manipulating a sentence can significantly alter its intended meaning [59]. When such manipulations occur in small regions, existing models trained on fully spoofed speech and relying on pooling functions struggle to detect these subtle changes. Consequently, there is growing interest in the detection of partially spoofed speech [51], [59], [75]." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 665, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 665, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 665, + 564, + 748 + ], + "type": "text", + "content": "To evaluate the performance of our proposed model across different spoofing tasks, we conduct experiments on the PartialSpoof dataset [51]. The results are presented in Table IX. First, we reproduce the performance of two SOTA models, achieving results comparable to those reported in their original papers [51], [59]. Additionally, we evaluate SE ResNet, which demonstrated performance similar to the other baselines. In" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 64, + 96, + 544, + 162 + ], + "blocks": [ + { + "bbox": [ + 45, + 56, + 564, + 94 + ], + "lines": [ + { + "bbox": [ + 45, + 56, + 564, + 94 + ], + "spans": [ + { + "bbox": [ + 45, + 56, + 564, + 94 + ], + "type": "text", + "content": "TABLE X THE PERFORMANCE IN EER " + }, + { + "bbox": [ + 45, + 56, + 564, + 94 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 45, + 56, + 564, + 94 + ], + "type": "text", + "content": " ON THE ASVspoof 2021 LA, DF [48], AND IN-THE-WILD [50] DATASETS. THE RESULTS ARE REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS. W/ AUG.' AND W/O AUG.' INDICATE WHETHER EVALUATION WITH AUGMENTATIONS ON THE VALIDATION SET IS USED TO SELECT THE BEST CHECKPOINT FOR TESTING. CKPT Avg. REFERS TO THE NUMBER OF CHECKPOINTS AVERAGED." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 64, + 96, + 544, + 162 + ], + "lines": [ + { + "bbox": [ + 64, + 96, + 544, + 162 + ], + "spans": [ + { + "bbox": [ + 64, + 96, + 544, + 162 + ], + "type": "table", + "html": "
Back-endTrain SetCKPT Avg.w/ Aug.w/o Aug.
21LA [48]21DF [48]In-the-Wild [50]21LA [48]21DF [48]In-the-Wild [50]
Nes2Net-XASVspoof 19 [47]N/A1.63 (1.79)1.84 (2.03)5.56 (6.61)1.73 (1.95)1.65 (1.91)5.73 (6.83)
31.70 (1.80)1.88 (1.98)5.15 (6.31)1.66 (1.87)1.54 (1.98)5.59 (6.90)
51.67 (1.78)1.80 (1.91)5.28 (6.31)1.88 (2.00)1.49 (1.78)5.52 (6.60)
", + "image_path": "bd43c91baeed1799a4c358e11c48f71336db4a8f12d9b56d330407e1dc303c30.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 175, + 301, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 175, + 301, + 198 + ], + "spans": [ + { + "bbox": [ + 45, + 175, + 301, + 198 + ], + "type": "text", + "content": "contrast, our proposed Nes2Net and Nes2Net-X outperform all three baselines." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 217, + 234, + 228 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 217, + 234, + 228 + ], + "spans": [ + { + "bbox": [ + 45, + 217, + 234, + 228 + ], + "type": "text", + "content": "G. Empirical Runtime and Memory Analysis" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 232, + 300, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 232, + 300, + 316 + ], + "spans": [ + { + "bbox": [ + 45, + 232, + 300, + 316 + ], + "type": "text", + "content": "Number of parameters and MMACs are widely adopted metrics for evaluating model efficiency. These platform-independent measures offer consistent and fair comparisons across different hardware. However, to better reflect the real-world deployment costs of back-end architectures, we additionally benchmark their training time, inference time, and peak GPU memory usage, as summarized in Table XI." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 47, + 381, + 306, + 510 + ], + "blocks": [ + { + "bbox": [ + 47, + 331, + 299, + 376 + ], + "lines": [ + { + "bbox": [ + 47, + 331, + 299, + 376 + ], + "spans": [ + { + "bbox": [ + 47, + 331, + 299, + 376 + ], + "type": "text", + "content": "TABLE XI TRAINING AND INFERENCE EFFICIENCY COMPARISON ACROSS BACK-END MODELS. THE TABLE REPORTS THE AVERAGE (AVG.) TRAINING AND INFERENCE TIME PER BATCH IN MILLSECONDS (MS/BATCH), AS WELL AS PEAK GPU MEMORY USAGE IN MEGABYTES (MB)." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 381, + 306, + 510 + ], + "lines": [ + { + "bbox": [ + 47, + 381, + 306, + 510 + ], + "spans": [ + { + "bbox": [ + 47, + 381, + 306, + 510 + ], + "type": "table", + "html": "
Back-endAvg. Time (ms/batch)↓Peak GPU Memory↓ (MB)
TrainingInference
AASIST Light (C=24)27.07.81,327
AASIST Standard(C=32)53.818.73,454
AASIST Large(C=40)79.228.14,273
AASIST XL(C=48)86.130.75,087
AASIST XXL(C=56)100.937.45,905
ResNet7.82.6691
Res2Net15.63.5721
ECAPA-TDNN (C=128)9.43.1698
Proposed Nes2Net20.24.91,312
Proposed Nes2Net-X29.19.22,231
", + "image_path": "9d42cb0231914a7b71d6b7e6fa59b6b9bb1fdc4444517d6b37eb83903910150a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 521, + 300, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 521, + 300, + 604 + ], + "spans": [ + { + "bbox": [ + 45, + 521, + 300, + 604 + ], + "type": "text", + "content": "All back-end models are evaluated under identical conditions: input features of 400 frames with 1024 dimensions, a batch size of 64, and execution on a dedicated NVIDIA H20 GPU. The first 10 batches are used for warm-up and excluded from the measurement, and the inference and training times are averaged over the subsequent 200 batches. Training time includes the forward, backward, and optimizer update steps." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 604, + 301, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 604, + 301, + 688 + ], + "spans": [ + { + "bbox": [ + 45, + 604, + 301, + 688 + ], + "type": "text", + "content": "The results show that AASIST models exhibit rapidly increasing runtime and memory consumption as the channel dimension " + }, + { + "bbox": [ + 45, + 604, + 301, + 688 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 45, + 604, + 301, + 688 + ], + "type": "text", + "content": " grows. In contrast, our proposed Nes2Net achieves notably lower latency and memory usage. Nes2Net-X further improves performance in some settings by preserving more high-dimensional information, albeit at the cost of higher resource consumption." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 689, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 689, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 689, + 301, + 750 + ], + "type": "text", + "content": "Conventional models such as ResNet, Res2Net, and ECAPA-TDNN offer faster runtime and smaller memory footprints than our proposed method, but fall short in detection accuracy as shown in earlier experiments. Therefore, when selecting a back-end architecture, we believe both Nes2Net" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 175, + 563, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 563, + 223 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 563, + 223 + ], + "type": "text", + "content": "and Nes2Net-X offer flexible options: the former prioritizes efficiency, while the latter favors accuracy when computational resources permit. This underscores the importance of balancing performance and efficiency in real-world applications." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 233, + 529, + 245 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 233, + 529, + 245 + ], + "spans": [ + { + "bbox": [ + 308, + 233, + 529, + 245 + ], + "type": "text", + "content": "H. Should We Use Augmentation During Validation?" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 247, + 563, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 247, + 563, + 391 + ], + "spans": [ + { + "bbox": [ + 307, + 247, + 563, + 391 + ], + "type": "text", + "content": "In all previous experiments, the datasets are split into three non-overlapping subsets: training, validation (or development), and test sets. The validation set is used to select the best-performing checkpoints for final evaluation on the test set. The training set typically applies data augmentation to enhance model performance and generalization. However, the use of augmentation during validation remains inconsistent across prior studies. For instance, wav2vec 2.0-AASIST [15] applies the same augmentation strategy to both training and validation sets. In contrast, WavLM-AASIST [16] does not use augmentation on the validation set, aligning with common practices in speaker verification research [34], [76], [77]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 392, + 563, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 392, + 563, + 462 + ], + "spans": [ + { + "bbox": [ + 307, + 392, + 563, + 462 + ], + "type": "text", + "content": "In this section, we compare these two approaches and report the results in Table X. We observe that applying the same augmentation to the validation set as in the training set leads to worse performance on ASVspoof 2021 DF [48], but better results on In-the-Wild [50]. When no augmentation is applied to the validation set, the opposite trend is observed." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 462, + 563, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 462, + 563, + 557 + ], + "spans": [ + { + "bbox": [ + 307, + 462, + 563, + 557 + ], + "type": "text", + "content": "From the outcome of the above study, we believe that in cases where robustness to certain variations (e.g., noise, compression, or distortions) is important, applying augmentation during validation provides insights into how well the model handles such conditions. As a result, the selected checkpoints from this approach may generalize better to these variations. Further investigation into this topic may yield deeper insights for future work." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 395, + 567, + 477, + 577 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 395, + 567, + 477, + 577 + ], + "spans": [ + { + "bbox": [ + 395, + 567, + 477, + 577 + ], + "type": "text", + "content": "VI. CONCLUSION" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 581, + 563, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 581, + 563, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 581, + 563, + 712 + ], + "type": "text", + "content": "In this work, we propose Nested Res2Net (Nes2Net) and its enhanced variant, Nes2Net-X, as lightweight and dimensionality reduction (DR) layer-free back-end architectures designed for speech anti-spoofing in the era of foundation models. Unlike conventional approaches that rely on a DR layer to bridge the mismatch between high-dimensional features and downstream classifiers, our proposed architectures directly process these rich representations. This not only eliminates the computational and parameter overhead introduced by DR layers but also avoids information loss, enhancing overall system efficiency and robustness." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 712, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 712, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 307, + 712, + 564, + 750 + ], + "type": "text", + "content": "Nes2Net incorporates a novel nested multi-scale design that enables more effective feature extraction and deeper cross-channel interactions without increasing model complexity." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 91 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 91 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 91 + ], + "type": "text", + "content": "The improved Nes2Net-X further strengthens representation learning by introducing learnable weighted feature fusion, offering adaptive control over the feature aggregation process." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 91, + 301, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 91, + 301, + 186 + ], + "spans": [ + { + "bbox": [ + 45, + 91, + 301, + 186 + ], + "type": "text", + "content": "We conduct extensive evaluations across five representative datasets: CtrSVDD, ASVspoof 2021, ASVspoof 5, Partial-Spoof, and In-the-Wild, covering a wide range of singing voice deepfakes, fully spoofed speech, adversarial attacks, real-world deepfakes, and partially spoofed speech. Across all scenarios, our models achieve SOTA performance, demonstrating superior generalization, compactness, and resilience under unseen and challenging conditions." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 186, + 301, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 186, + 301, + 258 + ], + "spans": [ + { + "bbox": [ + 45, + 186, + 301, + 258 + ], + "type": "text", + "content": "In summary, Nes2Net and Nes2Net-X offer a general-purpose, resource-efficient back-end for foundation model-based speech anti-spoofing, providing a practical yet powerful alternative to DR-dependent designs. To facilitate future research and applications, we make all source code and pretrained models publicly available." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 144, + 263, + 203, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 263, + 203, + 274 + ], + "spans": [ + { + "bbox": [ + 144, + 263, + 203, + 274 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 278, + 301, + 753 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 52, + 278, + 299, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 278, + 299, + 315 + ], + "spans": [ + { + "bbox": [ + 52, + 278, + 299, + 315 + ], + "type": "text", + "content": "[1] A. Baevski, Y. Zhou, A. Mohamed, and M. Auli, \"wav2vec 2.0: A framework for self-supervised learning of speech representations,\" in Proc. Adv. Neural Inf. Process. Syst. (NeurIPS), vol. 33, 2020, pp. 12449-12460." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 316, + 301, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 316, + 301, + 351 + ], + "spans": [ + { + "bbox": [ + 52, + 316, + 301, + 351 + ], + "type": "text", + "content": "[2] W.-N. Hsu, B. Bolte, Y.-H. H. Tsai, K. Lakhotia, R. Salakhutdinov, and A. Mohamed, \"HuBERT: Self-supervised speech representation learning by masked prediction of hidden units,\" IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 29, pp. 3451-3460, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 351, + 301, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 351, + 301, + 396 + ], + "spans": [ + { + "bbox": [ + 51, + 351, + 301, + 396 + ], + "type": "text", + "content": "[3] S. Chen, C. Wang, Z. Chen, Y. Wu, S. Liu, Z. Chen, J. Li, N. Kanda, T. Yoshioka, X. Xiao, J. Wu, L. Zhou, S. Ren, Y. Qian, Y. Qian, J. Wu, M. Zeng, X. Yu, and F. Wei, \"WavLM: Large-scale self-supervised pre-training for full stack speech processing,\" IEEE J. Sel. Top. Signal Process., vol. 16, no. 6, pp. 1505-1518, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 396, + 301, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 396, + 301, + 423 + ], + "spans": [ + { + "bbox": [ + 51, + 396, + 301, + 423 + ], + "type": "text", + "content": "[4] A. T. Liu, S.-W. Li, and H.-y. Lee, “TERA: Self-supervised learning of transformer encoder representation for speech,” IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 29, pp. 2351-2366, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 423, + 301, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 423, + 301, + 450 + ], + "spans": [ + { + "bbox": [ + 51, + 423, + 301, + 450 + ], + "type": "text", + "content": "[5] J. Zhao and W.-Q. Zhang, \"Improving automatic speech recognition performance for low-resource languages with self-supervised models,\" IEEE J. Sel. Top. Signal Process., vol. 16, no. 6, pp. 1227-1241, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 450, + 301, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 450, + 301, + 495 + ], + "spans": [ + { + "bbox": [ + 51, + 450, + 301, + 495 + ], + "type": "text", + "content": "[6] J. weon Jung, W. Zhang, J. Shi, Z. Aldeneh, T. Higuchi, A. Gichamba, B.-J. Theobald, A. Hussen Abdelaziz, and S. Watanabe, \"ESPnet-SPK: full pipeline speaker embedding toolkit with reproducible recipes, self-supervised front-ends, and off-the-shelf models,\" in Proc. INTERSPEECH, 2024, pp. 4278-4282." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 495, + 301, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 495, + 301, + 513 + ], + "spans": [ + { + "bbox": [ + 51, + 495, + 301, + 513 + ], + "type": "text", + "content": "[7] M. Li, Y. Ahmadiadli, and X.-P. Zhang, \"A survey on speech deepfake detection,\" ACM Comput. Surv., vol. 57, no. 7, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 513, + 301, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 513, + 301, + 548 + ], + "spans": [ + { + "bbox": [ + 51, + 513, + 301, + 548 + ], + "type": "text", + "content": "[8] N. M. Müller, P. Kawa, W. H. Choong, E. Casanova, E. Gölle, T. Müller, P. Syga, P. Sperl, and K. Böttinger, \"MLAAD: The multi-language audio anti-spoofing dataset,\" in Proc. Int. Jt. Conf. Neural Netw. (IJCNN), 2024, pp. 1-7." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 51, + 548, + 301, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 548, + 301, + 584 + ], + "spans": [ + { + "bbox": [ + 51, + 548, + 301, + 584 + ], + "type": "text", + "content": "[9] Y. Xie, Y. Lu, R. Fu, Z. Wen, Z. Wang, J. Tao, X. Qi, X. Wang, Y. Liu, H. Cheng, L. Ye, and Y. Sun, \"The codecfake dataset and countermeasures for the universally detection of deepfake audio,\" IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 33, pp. 386-400, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 584, + 301, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 584, + 301, + 611 + ], + "spans": [ + { + "bbox": [ + 47, + 584, + 301, + 611 + ], + "type": "text", + "content": "[10] R. K. Das, X. Tian, T. Kinnunen, and H. Li, “The attacker's perspective on automatic speaker verification: An overview,” in Proc. INTERSPEECH, 2020, pp. 4213–4217." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 611, + 301, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 611, + 301, + 647 + ], + "spans": [ + { + "bbox": [ + 47, + 611, + 301, + 647 + ], + "type": "text", + "content": "[11] J.-w. Jung, Y. Wu, X. Wang, J.-H. Kim, S. Maiti, Y. Matsunaga, H.-j. Shim, J. Tian, N. Evans, J. S. Chung, W. Zhang, S. Um, S. Takamichi, and S. Watanabe, \"SpoofCeleb: Speech deepfake detection and SASV in the wild,\" IEEE Open J. Signal Process., vol. 6, pp. 68-77, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 647, + 301, + 674 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 647, + 301, + 674 + ], + "spans": [ + { + "bbox": [ + 47, + 647, + 301, + 674 + ], + "type": "text", + "content": "[12] J. Du, X. Chen, H. Wu, L. Zhang, I. Lin, I. Chiu, W. Ren, Y. Tseng, Y. Tsao, J.-S. R. Jang et al., \"CodecFake-Omni: A large-scale codec-based deepfake speech dataset,\" arXiv preprint arXiv:2501.08238, 2025." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 674, + 301, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 674, + 301, + 692 + ], + "spans": [ + { + "bbox": [ + 47, + 674, + 301, + 692 + ], + "type": "text", + "content": "[13] X. Chen, H. Wu, R. Jang, and H. yi Lee, \"Singing voice graph modeling for singfake detection,\" in Proc. INTERSPEECH, 2024, pp. 4843-4847." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 692, + 301, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 692, + 301, + 718 + ], + "spans": [ + { + "bbox": [ + 47, + 692, + 301, + 718 + ], + "type": "text", + "content": "[14] Y. Zhu, S. Koppisetti, T. Tran, and G. Bharaj, \"SLIM: Style-linguistics mismatch model for generalized audio deepfake detection,\" in Proc. Adv. Neural Inf. Process. Syst. (NeurIPS), vol. 37, 2024, pp. 67901-67928." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 718, + 301, + 753 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 718, + 301, + 753 + ], + "spans": [ + { + "bbox": [ + 47, + 718, + 301, + 753 + ], + "type": "text", + "content": "[15] H. Tak, M. Todisco, X. Wang, J. weon Jung, J. Yamagishi, and N. Evans, \"Automatic speaker verification spoofing and deepfake detection using wav2vec 2.0 and data augmentation,\" in Proc. Odyssey Speaker Lang. Recognit. Workshop, 2022, pp. 112-119." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 56, + 564, + 731 + ], + "type": "list", + "angle": 0, + "index": 44, + "blocks": [ + { + "bbox": [ + 310, + 56, + 564, + 92 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 56, + 564, + 92 + ], + "spans": [ + { + "bbox": [ + 310, + 56, + 564, + 92 + ], + "type": "text", + "content": "[16] A. Guragain, T. Liu, Z. Pan, H. B. Sailor, and Q. Wang, \"Speech foundation model ensembles for the controlled singing voice deepfake detection (CtrSVDD) challenge 2024,\" in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 93, + 564, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 93, + 564, + 119 + ], + "spans": [ + { + "bbox": [ + 310, + 93, + 564, + 119 + ], + "type": "text", + "content": "[17] Y. Xiao and R. K. Das, \"XLSR-Mamba: A dual-column bidirectional state space model for spoofing attack detection,\" IEEE Signal Process Lett., vol. 32, pp. 1276-1280, 2025." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 120, + 564, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 120, + 564, + 147 + ], + "spans": [ + { + "bbox": [ + 310, + 120, + 564, + 147 + ], + "type": "text", + "content": "[18] S.-H. Gao, M.-M. Cheng, K. Zhao, X.-Y. Zhang, M.-H. Yang, and P. Torr, “Res2Net: A new multi-scale backbone architecture,” IEEE Trans. Pattern Anal. Mach. Intell., vol. 43, no. 2, pp. 652-662, 2021." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 147, + 564, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 147, + 564, + 173 + ], + "spans": [ + { + "bbox": [ + 310, + 147, + 564, + 173 + ], + "type": "text", + "content": "[19] K. He, X. Zhang, S. Ren, and J. Sun, \"Deep residual learning for image recognition,\" in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2016, pp. 770-778." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 173, + 564, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 173, + 564, + 201 + ], + "spans": [ + { + "bbox": [ + 310, + 173, + 564, + 201 + ], + "type": "text", + "content": "[20] Y. Chen, S. Zheng, H. Wang, L. Cheng, Q. Chen, and J. Qi, \"An enhanced Res2Net with local and global feature fusion for speaker verification,\" in Proc. INTERSPEECH, 2023, pp. 2228-2232." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 201, + 564, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 201, + 564, + 236 + ], + "spans": [ + { + "bbox": [ + 310, + 201, + 564, + 236 + ], + "type": "text", + "content": "[21] Y. Chen, S. Zheng, H. Wang, L. Cheng, Q. Chen, S. Zhang, and J. Li, \"ERes2NetV2: Boosting short-duration speaker verification performance with computational efficiency,\" in Proc. INTERSPEECH, 2024, pp. 3245-3249." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 236, + 564, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 236, + 564, + 263 + ], + "spans": [ + { + "bbox": [ + 310, + 236, + 564, + 263 + ], + "type": "text", + "content": "[22] T. Liu, K. A. Lee, Q. Wang, and H. Li, \"Golden Gemini is all you need: Finding the sweet spots for speaker verification,\" IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 32, pp. 2324-2337, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 263, + 564, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 263, + 564, + 290 + ], + "spans": [ + { + "bbox": [ + 310, + 263, + 564, + 290 + ], + "type": "text", + "content": "[23] X. Li, X. Wu, H. Lu, X. Liu, and H. Meng, \"Channel-wise gated Res2Net: Towards robust detection of synthetic speech attacks,\" in Proc. INTERSPEECH, 2021, pp. 4314-4318." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 290, + 564, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 290, + 564, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 290, + 564, + 308 + ], + "type": "text", + "content": "[24] J. Kim and S. M. Ban, \"Phase-aware spoof speech detection based on Res2Net with phase network,\" in Proc. ICASSP, 2023, pp. 1-5." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 308, + 564, + 344 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 308, + 564, + 344 + ], + "spans": [ + { + "bbox": [ + 310, + 308, + 564, + 344 + ], + "type": "text", + "content": "[25] T. Liu, I. Kukanov, Z. Pan, Q. Wang, H. B. Sailor, and K. A. Lee, \"Towards quantifying and reducing language mismatch effects in cross-lingual speech anti-spoofing,\" in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024, pp. 1185-1192." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 344, + 564, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 344, + 564, + 379 + ], + "spans": [ + { + "bbox": [ + 310, + 344, + 564, + 379 + ], + "type": "text", + "content": "[26] J.-w. Jung, H.-S. Heo, H. Tak, H.-j. Shim, J. S. Chung, B.-J. Lee, H.-J. Yu, and N. Evans, \"AASIST: Audio anti-spoofing using integrated spectro-temporal graph attention networks,\" in Proc. ICASSP, 2022, pp. 6367-6371." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 380, + 564, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 380, + 564, + 415 + ], + "spans": [ + { + "bbox": [ + 310, + 380, + 564, + 415 + ], + "type": "text", + "content": "[27] Y. Chen, J. Yi, J. Xue, C. Wang, X. Zhang, S. Dong, S. Zeng, J. Tao, Z. Lv, and C. Fan, \"RawBMamba: End-to-end bidirectional state space model for audio deepfake detection,\" in Proc. INTERSPEECH, 2024, pp. 2720-2724." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 415, + 564, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 415, + 564, + 442 + ], + "spans": [ + { + "bbox": [ + 310, + 415, + 564, + 442 + ], + "type": "text", + "content": "[28] Y. Chen, H. Wu, N. Jiang, X. Xia, Q. Gu, Y. Hao, P. Cai, Y. Guan, J. Wang, W. Xie et al., \"Ustc-kxdigit system description for asvsproof5 challenge,\" arXiv preprint arXiv:2409.01695, 2024." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 442, + 564, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 442, + 564, + 469 + ], + "spans": [ + { + "bbox": [ + 310, + 442, + 564, + 469 + ], + "type": "text", + "content": "[29] Z. Wei, D. Ye, J. Deng, and Y. Lin, “From voices to beats: Enhancing music deepfake detection by identifying forgeries in background,” in Proc. ICASSP, 2025, pp. 1-5." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 310, + 469, + 564, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 469, + 564, + 496 + ], + "spans": [ + { + "bbox": [ + 310, + 469, + 564, + 496 + ], + "type": "text", + "content": "[30] Y. Guan, Y. Ai, Z. Li, S. Peng, and W. Guo, \"Recursive feature learning from pre-trained models for spoofing speech detection,\" in Proc. ICASSP, 2025, pp. 1-5." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 310, + 496, + 564, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 496, + 564, + 523 + ], + "spans": [ + { + "bbox": [ + 310, + 496, + 564, + 523 + ], + "type": "text", + "content": "[31] Z. Pan, T. Liu, H. B. Sailor, and Q. Wang, \"Attentive merging of hidden embeddings from pre-trained speech model for anti-spoofing detection,\" in Proc. INTERSPEECH, 2024, pp. 2090-2094." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 310, + 523, + 564, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 523, + 564, + 558 + ], + "spans": [ + { + "bbox": [ + 310, + 523, + 564, + 558 + ], + "type": "text", + "content": "[32] M. Huaifah, T. Liu, H. B. Sailor, K. M. Tan, T. K. Vangani, Q. Wang, J. H. Wong, N. F. Chen, and A. T. Aw, \"Towards a speech foundation model for Singapore and beyond,\" arXiv preprint arXiv:2412.11538, 2024." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 559, + 564, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 559, + 564, + 594 + ], + "spans": [ + { + "bbox": [ + 310, + 559, + 564, + 594 + ], + "type": "text", + "content": "[33] D.-T. Truong, R. Tao, T. Nguyen, H.-T. Luong, K. A. Lee, and E. S. Chng, “Temporal-channel modeling in multi-head self-attention for synthetic speech detection,” in Proc. INTERSPEECH, 2024, pp. 537–541." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 594, + 564, + 630 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 594, + 564, + 630 + ], + "spans": [ + { + "bbox": [ + 310, + 594, + 564, + 630 + ], + "type": "text", + "content": "[34] B. Desplanques, J. Thienpondt, and K. Demuynck, \"ECAPA-TDNN: Emphasized channel attention, propagation and aggregation in TDNN based speaker verification,\" in Proc. INTERSPEECH, 2020, pp. 3830-3834." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 631, + 564, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 631, + 564, + 658 + ], + "spans": [ + { + "bbox": [ + 310, + 631, + 564, + 658 + ], + "type": "text", + "content": "[35] Q. Zhang, S. Wen, and T. Hu, \"Audio deepfake detection with self-supervised XLS-R and SLS classifier,\" in Proc. ACM Int. Conf. Multimedia, 2024, pp. 6765-6773." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 310, + 658, + 564, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 658, + 564, + 685 + ], + "spans": [ + { + "bbox": [ + 310, + 658, + 564, + 685 + ], + "type": "text", + "content": "[36] Z. Ge, X. Xu, H. Guo, Z. Yang, and B. Schuller, \"Gncl: A graph neural network with consistency loss for segment-level spoofed speech detection,\" in Proc. ICASSP, 2025, pp. 1-5." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 310, + 685, + 564, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 685, + 564, + 731 + ], + "spans": [ + { + "bbox": [ + 310, + 685, + 564, + 731 + ], + "type": "text", + "content": "[37] X. Wang, H. Delgado, H. Tak, J. weon Jung, H. jin Shim, M. Todisco, I. Kukanov, X. Liu, M. Sahidullah, T. H. Kinnunen, N. Evans, K. A. Lee, and J. Yamagishi, \"ASVspoof 5: crowdsourced speech data, deepfakes, and adversarial attacks at scale,\" in Autom. Speaker Verif. Spoofing Countermeas. Workshop, 2024, pp. 1-8." + } + ] + } + ], + "index": 43 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 56, + 301, + 657 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 47, + 56, + 301, + 83 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 56, + 301, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 56, + 301, + 83 + ], + "type": "text", + "content": "[38] Y. Zhang, Y. Zang, J. Shi, R. Yamamoto, T. Toda, and Z. Duan, \"SVDD 2024: The inaugural singing voice deepfake detection challenge,\" in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024, pp. 782-787." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 84, + 301, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 84, + 301, + 119 + ], + "spans": [ + { + "bbox": [ + 47, + 84, + 301, + 119 + ], + "type": "text", + "content": "[39] Q. Zhang, S. Wen, F. Yan, T. Hu, and J. Li, \"XWSB: A blend system utilizing XLS-R and WavLM with SLS classifier detection system for SVDD 2024 challenge,\" in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024, pp. 788-794." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 120, + 301, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 120, + 301, + 147 + ], + "spans": [ + { + "bbox": [ + 47, + 120, + 301, + 147 + ], + "type": "text", + "content": "[40] J. Yi, J. Tao, R. Fu, X. Yan, C. Wang, T. Wang, C. Y. Zhang, X. Zhang, Y. Zhao, Y. Ren et al., \"ADD 2023: the second audio deepfake detection challenge,\" arXiv preprint arXiv:2305.13774, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 147, + 301, + 164 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 147, + 301, + 164 + ], + "spans": [ + { + "bbox": [ + 47, + 147, + 301, + 164 + ], + "type": "text", + "content": "[41] J. Hu, L. Shen, and G. Sun, \"Squeeze-and-excitation networks,\" in Proc. IEEE Conf. Comput. Vis. Pattern Recog. (CVPR), 2018." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 165, + 301, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 165, + 301, + 191 + ], + "spans": [ + { + "bbox": [ + 47, + 165, + 301, + 191 + ], + "type": "text", + "content": "[42] K. Okabe, T. Koshinaka, and K. Shinoda, \"Attentive statistics pooling for deep speaker embedding,\" in Proc. INTERSPEECH, 2018, pp. 2252-2256." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 192, + 301, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 192, + 301, + 218 + ], + "spans": [ + { + "bbox": [ + 47, + 192, + 301, + 218 + ], + "type": "text", + "content": "[43] T. Zhou, Y. Zhao, and J. Wu, \"ResNeXt and Res2Net structures for speaker verification,\" in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2021, pp. 301-307." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 219, + 301, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 219, + 301, + 245 + ], + "spans": [ + { + "bbox": [ + 47, + 219, + 301, + 245 + ], + "type": "text", + "content": "[44] Q. Wang, B. Wu, P. Zhu, P. Li, W. Zuo, and Q. Hu, \"ECA-Net: Efficient channel attention for deep convolutional neural networks,\" in Proc. IEEE Conf. Comput. Vis. Pattern Recog. (CVPR), 2020, pp. 11531-11539." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 246, + 301, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 246, + 301, + 272 + ], + "spans": [ + { + "bbox": [ + 47, + 246, + 301, + 272 + ], + "type": "text", + "content": "[45] T. Liu, R. K. Das, K. A. Lee, and H. Li, \"MFA: TDNN with multi-scale frequency-channel attention for text-independent speaker verification with short utterances,\" in Proc. ICASSP, 2022, pp. 7517-7521." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 273, + 301, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 273, + 301, + 308 + ], + "spans": [ + { + "bbox": [ + 47, + 273, + 301, + 308 + ], + "type": "text", + "content": "[46] Y. Zang, J. Shi, Y. Zhang, R. Yamamoto, J. Han, Y. Tang, S. Xu, W. Zhao, J. Guo, T. Toda, and Z. Duan, \"CtrSVDD: A benchmark dataset and baseline analysis for controlled singing voice deepfake detection,\" in Proc. INTERSPEECH, 2024, pp. 4783-4787." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 308, + 301, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 301, + 397 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 301, + 397 + ], + "type": "text", + "content": "[47] X. Wang, J. Yamagishi, M. Todisco, H. Delgado, A. Nautsch, N. Evans, M. Sahidullah, V. Vestman, T. Kinnunen, K. A. Lee, L. Juvela, P. Alku, Y.-H. Peng, H.-T. Hwang, Y. Tsao, H.-M. Wang, S. L. Maguer, M. Becker, F. Henderson, R. Clark, Y. Zhang, Q. Wang, Y. Jia, K. Onuma, K. Mushika, T. Kaneda, Y. Jiang, L.-J. Liu, Y.-C. Wu, W.-C. Huang, T. Toda, K. Tanaka, H. Kameoka, I. Steiner, D. Matrouf, J.-F. Bonastre, A. Govender, S. Ronanki, J.-X. Zhang, and Z.-H. Ling, \"ASVspoof 2019: A large-scale public database of synthesized, converted and replayed speech,\" Comput. Speech Lang., vol. 64, p. 101114, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 398, + 301, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 398, + 301, + 442 + ], + "spans": [ + { + "bbox": [ + 47, + 398, + 301, + 442 + ], + "type": "text", + "content": "[48] J. Yamagishi, X. Wang, M. Todisco, M. Sahidullah, J. Patino, A. Nautsch, X. Liu, K. A. Lee, T. Kinnunen, N. Evans, and H. Delgado, \"ASVspoof 2021: accelerating progress in spoofed and deepfake speech detection,\" in Autom. Speaker Verif. Spoofing Countermeas. Challenge, 2021, pp. 47-54." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 443, + 301, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 443, + 301, + 487 + ], + "spans": [ + { + "bbox": [ + 47, + 443, + 301, + 487 + ], + "type": "text", + "content": "[49] X. Wang, H. Delgado, H. Tak, J.-w. Jung, H.-j. Shim, M. Todisco, I. Kukanov, X. Liu, M. Sahidullah, T. Kinnunen et al., \"ASVspoof 5: Design, collection and validation of resources for spoofing, deepfake, and adversarial attack detection using crowdsourced speech,\" arXiv preprint arXiv:2502.08857, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 488, + 301, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 488, + 301, + 514 + ], + "spans": [ + { + "bbox": [ + 47, + 488, + 301, + 514 + ], + "type": "text", + "content": "[50] N. M. Müller, P. Czempin, F. Dieckmann, A. Froghyar, and K. Böttinger, \"Does audio deepfake detection generalize?\" in Proc. INTERSPEECH, 2022, pp. 2783-2787." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 514, + 301, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 514, + 301, + 550 + ], + "spans": [ + { + "bbox": [ + 47, + 514, + 301, + 550 + ], + "type": "text", + "content": "[51] L. Zhang, X. Wang, E. Cooper, N. Evans, and J. Yamagishi, \"The PartialProof database and countermeasures for the detection of short fake speech segments embedded in an utterance,\" IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 31, pp. 813-825, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 551, + 301, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 551, + 301, + 568 + ], + "spans": [ + { + "bbox": [ + 47, + 551, + 301, + 568 + ], + "type": "text", + "content": "[52] Y. Zang, Y. Zhang, M. Heydari, and Z. Duan, \"SingFake: Singing voice deepfake detection,\" in Proc. ICASSP, 2024, pp. 12156-12160." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 569, + 301, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 569, + 301, + 594 + ], + "spans": [ + { + "bbox": [ + 47, + 569, + 301, + 594 + ], + "type": "text", + "content": "[53] Y. Xie, J. Zhou, X. Lu, Z. Jiang, Y. Yang, H. Cheng, and L. Ye, \"FSD: An initial chinese dataset for fake song detection,\" in Proc. ICASSP, 2024, pp. 4605-4609." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 595, + 301, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 595, + 301, + 622 + ], + "spans": [ + { + "bbox": [ + 47, + 595, + 301, + 622 + ], + "type": "text", + "content": "[54] T.-Y. Lin, P. Goyal, R. Girshick, K. He, and P. Dollar, “Focal loss for dense object detection,” in IEEE Int. Conf. Comput. Vis. (ICCV), 2017, pp. 2980–2988." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 622, + 301, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 622, + 301, + 657 + ], + "spans": [ + { + "bbox": [ + 47, + 622, + 301, + 657 + ], + "type": "text", + "content": "[55] H. Tak, M. Kamble, J. Patino, M. Todisco, and N. Evans, \"Rawboost: A raw data boosting and augmentation method applied to automatic speaker verification anti-spoofing,\" in Proc. ICASSP, 2022, pp. 6382-6386." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 56, + 563, + 656 + ], + "type": "list", + "angle": 0, + "index": 43, + "blocks": [ + { + "bbox": [ + 310, + 56, + 563, + 75 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 56, + 563, + 75 + ], + "spans": [ + { + "bbox": [ + 310, + 56, + 563, + 75 + ], + "type": "text", + "content": "[56] D. P. Kingma and J. Ba, “Adam: A method for stochastic optimization,” in Int. Conf. Learn. Represent., 2015." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 76, + 563, + 92 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 76, + 563, + 92 + ], + "spans": [ + { + "bbox": [ + 310, + 76, + 563, + 92 + ], + "type": "text", + "content": "[57] D. Snyder, G. Chen, and D. Povey, “Musan: A music, speech, and noise corpus,” arXiv preprint arXiv:1510.08484, 2015." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 93, + 563, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 93, + 563, + 128 + ], + "spans": [ + { + "bbox": [ + 310, + 93, + 563, + 128 + ], + "type": "text", + "content": "[58] T. Ko, V. Peddinti, D. Povey, M. L. Seltzer, and S. Khudanpur, “A study on data augmentation of reverberant speech for robust speech recognition,” in 2017 IEEE international conference on acoustics, speech and signal processing (ICASSP). IEEE, 2017, pp. 5220–5224." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 128, + 563, + 154 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 128, + 563, + 154 + ], + "spans": [ + { + "bbox": [ + 310, + 128, + 563, + 154 + ], + "type": "text", + "content": "[59] T. Liu, L. Zhang, R. K. Das, Y. Ma, R. Tao, and H. Li, \"How do neural spoofing countermeasures detect partially spoofed audio?\" in Proc. INTERSPEECH, 2024, pp. 1105-1109." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 155, + 563, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 155, + 563, + 180 + ], + "spans": [ + { + "bbox": [ + 310, + 155, + 563, + 180 + ], + "type": "text", + "content": "[60] X. Wang and J. Yamagishi, “A comparative study on recent neural spoofing countermeasures for synthetic speech detection,” in Proc. INTERSPEECH, 2021, pp. 4259–4263." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 181, + 563, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 181, + 563, + 207 + ], + "spans": [ + { + "bbox": [ + 310, + 181, + 563, + 207 + ], + "type": "text", + "content": "[61] J. M. Martin-Doñas and A. Álvarez, “The Vicomtech audio deepfake detection system based on wav2vec2 for the 2022 ADD challenge,” in Proc. ICASSP, 2022, pp. 9241–9245." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 208, + 563, + 234 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 208, + 563, + 234 + ], + "spans": [ + { + "bbox": [ + 310, + 208, + 563, + 234 + ], + "type": "text", + "content": "[62] X. Wang and J. Yamagishi, “Investigating self-supervised front ends for speech spoofing countermeasures,” in Proc. Odyssey Speaker Lang. Recognit. Workshop, 2022, pp. 100–106." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 235, + 563, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 235, + 563, + 261 + ], + "spans": [ + { + "bbox": [ + 310, + 235, + 563, + 261 + ], + "type": "text", + "content": "[63] E. Rosello, A. Gomez-Alanis, A. M. Gomez, and A. Peinado, “A conformer-based classifier for variable-length utterance processing in anti-spoofing,” in Proc. INTERSPEECH, 2023, pp. 5281-5285." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 262, + 563, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 262, + 563, + 297 + ], + "spans": [ + { + "bbox": [ + 310, + 262, + 563, + 297 + ], + "type": "text", + "content": "[64] E. Rosello, A. M. Gomez, I. López-Espejo, A. M. Peinado, and J. M. Martín-Doñas, “Anti-spoofing ensembling model: Dynamic weight allocation in ensemble models for improved voice biometrics security,” in Proc. INTERSPEECH, 2024, pp. 497–501." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 298, + 563, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 298, + 563, + 324 + ], + "spans": [ + { + "bbox": [ + 310, + 298, + 563, + 324 + ], + "type": "text", + "content": "[65] H. M. Tran, D. Guennec, P. Martin, A. Sini, D. Loline, A. Delhay, and P.-F. Marteau, \"Spoofed speech detection with a focus on speaker embedding,\" in Proc. INTERSPEECH, 2024, pp. 2080-2084." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 324, + 563, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 324, + 563, + 360 + ], + "spans": [ + { + "bbox": [ + 310, + 324, + 563, + 360 + ], + "type": "text", + "content": "[66] B. Wang, Y. Tang, F. Wei, Z. Ba, and K. Ren, \"FTDKD: Frequency-time domain knowledge distillation for low-quality compressed audio deepfake detection,\" IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 32, pp. 4905-4918, 2024." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 361, + 563, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 361, + 563, + 387 + ], + "spans": [ + { + "bbox": [ + 310, + 361, + 563, + 387 + ], + "type": "text", + "content": "[67] Y. Zhang, J. Lu, Z. Shang, W. Wang, and P. Zhang, “Improving short utterance anti-spoofing with AASIST2,” in Proc. ICASSP, 2024, pp. 11636-11640." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 388, + 563, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 388, + 563, + 414 + ], + "spans": [ + { + "bbox": [ + 310, + 388, + 563, + 414 + ], + "type": "text", + "content": "[68] Y. Guo, H. Huang, X. Chen, H. Zhao, and Y. Wang, \"Audio deepfake detection with self-supervised WavLm and multi-fusion attentive classifier,\" in Proc. ICASSP, 2024, pp. 12702-12706." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 415, + 563, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 415, + 563, + 441 + ], + "spans": [ + { + "bbox": [ + 310, + 415, + 563, + 441 + ], + "type": "text", + "content": "[69] Z. Wang, R. Fu, Z. Wen, J. Tao, X. Wang, Y. Xie, X. Qi, S. Shi, Y. Lu, Y. Liu et al., \"Mixture of experts fusion for fake audio detection using frozen wav2vec 2.0,\" arXiv preprint arXiv:2409.11909, 2024." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 441, + 563, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 441, + 563, + 468 + ], + "spans": [ + { + "bbox": [ + 310, + 441, + 563, + 468 + ], + "type": "text", + "content": "[70] J. Lu, Y. Zhang, W. Wang, Z. Shang, and P. Zhang, “One-class knowledge distillation for spoofing speech detection,” in Proc. ICASSP, 2024, pp. 11251-11255." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 310, + 468, + 563, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 468, + 563, + 495 + ], + "spans": [ + { + "bbox": [ + 310, + 468, + 563, + 495 + ], + "type": "text", + "content": "[71] W. Huang, Y. Gu, Z. Wang, H. Zhu, and Y. Qian, \"Generalizable audio deepfake detection via latent space refinement and augmentation,\" in Proc. ICASSP, 2025, pp. 1-5." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 310, + 495, + 563, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 495, + 563, + 513 + ], + "spans": [ + { + "bbox": [ + 310, + 495, + 563, + 513 + ], + "type": "text", + "content": "[72] Z. Jin, L. Lang, and B. Leng, \"Wave-spectrogram cross-modal aggregation for audio deepfake detection,\" in Proc. ICASSP, 2025, pp. 1-5." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 310, + 514, + 563, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 514, + 563, + 539 + ], + "spans": [ + { + "bbox": [ + 310, + 514, + 563, + 539 + ], + "type": "text", + "content": "[73] C. Y. Kwok, D.-T. Truong, and J. Q. Yip, \"Robust audio deepfake detection using ensemble confidence calibration,\" in Proc. ICASSP, 2025, pp. 1-5." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 540, + 563, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 540, + 563, + 566 + ], + "spans": [ + { + "bbox": [ + 310, + 540, + 563, + 566 + ], + "type": "text", + "content": "[74] O. Pascu, A. Stan, D. Oneata, E. Oneata, and H. Cucu, \"Towards generalisable and calibrated audio deepfake detection with self-supervised representations,\" in Proc. INTERSPEECH, 2024, pp. 4828-4832." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 567, + 563, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 567, + 563, + 594 + ], + "spans": [ + { + "bbox": [ + 310, + 567, + 563, + 594 + ], + "type": "text", + "content": "[75] H.-T. Luong, H. Li, L. Zhang, K. A. Lee, and E. S. Chng, “LlamaPartial-Spoof: An LLM-driven fake speech dataset simulating disinformation generation,” arXiv preprint arXiv:2409.14743, 2024." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 594, + 563, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 594, + 563, + 620 + ], + "spans": [ + { + "bbox": [ + 310, + 594, + 563, + 620 + ], + "type": "text", + "content": "[76] T. Liu, K. A. Lee, Q. Wang, and H. Li, \"Disentangling voice and content with self-supervision for speaker recognition,\" Proc. Adv. Neural Inf. Process. Syst. (NeurIPS), vol. 36, pp. 50221-50236, 2023." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 310, + 620, + 563, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 620, + 563, + 656 + ], + "spans": [ + { + "bbox": [ + 310, + 620, + 563, + 656 + ], + "type": "text", + "content": "[77] S. Wang, Z. Chen, K. A. Lee, Y. Qian, and H. Li, “Overview of speaker modeling and its applications: From the lens of deep speaker representation learning,” IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 32, pp. 4971–4998, 2024." + } + ] + } + ], + "index": 42 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 277, + 33 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05692/996e8671-5341-4c85-8ef1-45152320354c_content_list.json b/data/2025/2504_05xxx/2504.05692/996e8671-5341-4c85-8ef1-45152320354c_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..99f89979996ae730ad44389f9411fdbc4fd8bebe --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/996e8671-5341-4c85-8ef1-45152320354c_content_list.json @@ -0,0 +1,1546 @@ +[ + { + "type": "text", + "text": "POMATO: Marrying Pointmap Matching with Temporal Motions for Dynamic 3D Reconstruction", + "text_level": 1, + "bbox": [ + 166, + 128, + 831, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Songyan Zhang $^{1*}$ Yongtao Ge $^{2,3*}$ Jinyuan Tian $^{2*}$ Guangkai Xu $^{2}$ Hao Chen $^{2\\boxtimes}$ Chen Lv $^{1}$ Chunhua Shen $^{2}$", + "bbox": [ + 214, + 200, + 776, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1Nanyang Technological University, Singapore 2Zhejiang University, China 3The University of Adelaide, Australia", + "bbox": [ + 186, + 244, + 812, + 282 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/2038d457d38fd0c221b12bdad5e2a377292a6a5391edc6521c9a1a933405292f.jpg", + "image_caption": [ + "Figure 1. 3D reconstruction from an arbitrary dynamic video with POMATO. Without relying on external modules, POMATO can directly perform 3D reconstruction along with temporal 3D point tracking and dynamic mask estimation." + ], + "image_footnote": [], + "bbox": [ + 94, + 320, + 199, + 579 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/7ef319305d817284a367ded55f397489a47ecfddf523efa5658896a2d17354bf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 205, + 324, + 609, + 579 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/17b51d4a6a4dec36c51e858d07bf3a17155c0a4b6326adbd345b3eed1d1b4ef0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 614, + 325, + 893, + 575 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 246, + 633, + 326, + 648 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent approaches to 3D reconstruction in dynamic scenes primarily rely on the integration of separate geometry estimation and matching modules, where the latter plays a critical role in distinguishing dynamic regions and mitigating the interference caused by moving objects. Furthermore, the matching module explicitly models object motion, enabling the tracking of specific targets and advancing motion understanding in complex scenarios. Recently, the proposed representation of pointmap in DUSt3R suggests a potential solution to unify both geometry estimation and matching in 3D space, effectively reducing computational overhead by eliminating the need for redundant auxiliary modules. However, it still struggles with ambiguous correspondences in dynamic regions, which limits reconstruction", + "bbox": [ + 88, + 666, + 482, + 876 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "performance in such scenarios. In this work, we present POMATO, a unified framework for dynamic 3D reconstruction by marrying POintmap MAtching with Temporal mOtion. Specifically, our method first learns an explicit matching relationship by mapping RGB pixels across different views to 3D pointmaps within a unified coordinate system. Furthermore, we introduce a temporal motion module for dynamic motions that ensures scale consistency across different frames and enhances performance in 3D reconstruction tasks requiring both precise geometry and reliable matching, most notably 3D point tracking. We show the effectiveness of our proposed POMATO by demonstrating the remarkable performance across multiple downstream tasks, including video depth estimation, 3D point tracking, and pose estimation. Code and models are publicly available at https://github.com/wyddmw/POMATO.", + "bbox": [ + 511, + 636, + 906, + 876 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.05692v2 [eess.IV] 8 Aug 2025", + "bbox": [ + 22, + 273, + 60, + 722 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Equal contribution. $\\boxdot$ Corresponding author.", + "bbox": [ + 114, + 886, + 362, + 898 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a572bf8f3f31147ee0b7fe7ce8470f549bc4ee48d2b9a7606bf4cb6e04f57753.jpg", + "image_caption": [ + "Image1" + ], + "image_footnote": [], + "bbox": [ + 165, + 93, + 392, + 227 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/865c5f7f18ca194c59889f3437641b2c340f0dae47f7ba5904d226f77a02757d.jpg", + "image_caption": [ + "Figure 2. Ambiguity in 3D point matching in dynamic scenes with DUSt3R. Given representative corresponding pixels of background (orange) and moving foreground (red) in two different views, DUSt3R outputs a pair of 3D points within the same coordinate system. In static regions, identical pixels share the same 3D coordinates which provide an accurate matching relationship in 3D space, but in moving regions, the 3D coordinates are inconsistent for corresponding pixels across views, leading to ambiguous 3D matching relationships." + ], + "image_footnote": [], + "bbox": [ + 406, + 89, + 759, + 237 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/22f3ec6d130b32d236544f7a36f8d34c068ee43491968d8b8252afd707b98ed1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 761, + 93, + 898, + 227 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 89, + 316, + 220, + 330 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Image-based 3D reconstruction is a fundamental task in computer vision with a wide range of applications including SLAM [39], robotics [19, 49], autonomous driving [53], and novel view synthesis [5]. While substantial progress has been achieved in static 3D reconstruction [16, 23, 26, 44, 51], dynamic scenes remain a major hurdle due to complexities like non-rigid motion and deformation, which may hamper the learning of local structure and camera motion, thereby complicating accurate 3D reconstruction for dynamic scenes. These scenarios require explicit modeling of both scene geometry and object motion. Moreover, downstream reconstruction tasks, such as 3D point tracking, demand precise geometry estimation and robust matching across views. To effectively distinguish dynamic regions, it is essential to establish reliable correspondences between different frames. Some pioneering works have attempted to address dynamic motion by incorporating additional auxiliary matching modules, such as optical flow [42, 52] or 2D tracking [47]. However, these approaches may suffer from domain gaps and accumulated errors between modules, limiting their effectiveness. A unified framework that seamlessly integrates geometry estimation and matching for dynamic 3D reconstruction remains a critical and underexplored challenge.", + "bbox": [ + 91, + 347, + 483, + 709 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recently, DUSt3R [44] proposes a promising solution to address this challenge. It introduces the concept of a pointmap that assigns each pixel in an image to a corresponding 3D coordinate. The network utilizes a standard transformer-based encoder-decoder architecture and receives a pair of images as input. The system incorporates two parallel decoders to predict pointmaps for each view within the same coordinate system. However, this representation is limited to static matching and struggles in dynamic scenes, as illustrated in Fig. 2.", + "bbox": [ + 89, + 714, + 482, + 864 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address this problem, we present POMATO, a unified network for dynamic 3D reconstruction by marrying", + "bbox": [ + 89, + 869, + 482, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "POintmap MMatching with Temporal mOtion. We argue that with iterative cross-attention modules across different views, matching features are well preserved in the decoder tokens. We thus introduce an auxiliary pointmap matching head to learn explicit correspondences. Specifically, for each pixel in the second view, the pointmap matching head predicts the corresponding 3D coordinates of its counterpart in the first view, under the shared coordinate system. Our proposed pointmap-based matching representation enables the establishment of explicit correspondences in 3D space, which can be directly leveraged for motion analysis, especially the estimation of dynamic regions. Moreover, we further extend our POMATO to handle 4D video sequences by introducing a temporal motion module that enhances the learning of temporal motions. This motion module promotes scale consistency across different frames and improves performance in tasks where both accurate geometry and reliable matching are paramount, most notably 3D point tracking. Compared with recent temporal 3D reconstruction methods [41, 43] based on an autoregression manner where the previous frames are blocked from the recently added frames, our temporal motion module is based on the self-attention mechanism along the temporal dimension, facilitating a comprehensive interaction across all frames. Our POMATO is trained in a two-stage manner. In the first stage, we used pairwise input images to learn fundamental geometry and matching capabilities. In the second stage, we extend the input to sequential video frames and incorporate the temporal motion module, enabling the model to effectively capture motions over time.", + "bbox": [ + 511, + 316, + 906, + 770 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contributions can be summarized in threefold: First, we propose a novel approach that unifies the fundamental geometry estimation and motion understanding for dynamic 3D reconstruction into a single network by incorporating the representation of pointmap matching. Second, we introduce a temporal motion module to facilitate the interactions of motion features along the temporal dimension, which significantly improves the performance in tasks", + "bbox": [ + 511, + 779, + 908, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where both accurate geometry and precise matching are required for video sequential input, most notably 3D point tracking. Third, we demonstrate promising performance on 3D vision tasks, including video depth estimation, 3D point tracking, and camera pose estimation.", + "bbox": [ + 89, + 90, + 480, + 167 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 89, + 180, + 232, + 196 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Geometry estimation refers to the process of determining the spatial properties and structures from different forms of visual data. Direct recovery of 3D geometry from a single RGB image is by nature an ill-posed problem. Many recent works [3, 16, 23, 51] have tried to leverage strong pre-trained models to learn generalizable depthmaps from large-scale real and synthetic datasets to solve ambiguities. For example, Marigold [23], Geowizard [11], and GenPercept [48] aim at leveraging the generative priors from pre-trained diffusion models by finetuning them on synthetic datasets. Depthanything V2 [51] proposes to estimate scale-and-shift invariant disparity map by finetuning DINOV2 [29] model on synthetic datasets and largescale pseudo labels. Depth Pro [3] further propose a FOV head to estimate the metric depthmap from a single image without relying on camera intrinsics as input. Due to the scale ambiguity in the monocular depth estimation models, ChronoDepth [36], DepthCrafter [17], and Depth-any-video [50] proposes to learn temporal consistent depthmaps by leveraging the priors from a video generative model, i.e. SVD [2]. In another line of the research, multi-view stereo reconstruction (MVS) methods seek to reconstruct visible surfaces from multiple viewpoints. Traditional MVS [12] and SfM pipelines break the reconstruction pipeline into several sub-problems, e.g., feature extraction [8], image matching [1, 27], triangulation, and bundle adjustment [7]. The chain is complicated and accumulates noise for every single step, thus often resulting in unsatisfactory performance in complex real-world scenes. Recognizing the limitations of previous MVS methods, seminal work DUSt3R [44] proposes 3D pointmaps representation, and trains a network from large-scale data to regress the dense and accurate pointmaps from a pair of images. The camera intrinsics and relative camera poses can be implicitly inferred from the two-view pointmaps. However, it still can not handle reconstruction for dynamic scenes. MonST3R [52] directly finetuned the original DUSt3R model upon synthetic datasets that contain dynamic scenes. Motion representation. Optical flow is a commonly used representation for 2D motion. RAFT [38] is a representative work for pairwise optical flow estimation, which employs a 4D cost volume and recurrently estimates the optical flow. Some follow-up methods further extend it to multi-frame (3-5 frames) settings, which is still insufficient for long-range tracking. To resolve the problem, Particle Video [35] represent video motion by using a set of particles. Each", + "bbox": [ + 89, + 205, + 483, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "particle is an image point sample with a long-duration trajectory and other properties. Particle videos have two key advantages over optical flow: (1) persistence through occlusions, and (2) multi-frame temporal context. Some recent works, PIPs [15], TAPIR [9] and Cotracker [22] have renewed interest in this representation and show promising long-term 2D point tracking results. Recognizing the advantage of point representation, SpatialTracker [47] lifts the 2D points into 3D and performs tracking in the 3D space. Though it can handle occlusions and enhance 3D tracking accuracy, it still relies on a separate monocular depth estimator, which prevents it performing 3D point tracking in an end-to-end fashion.", + "bbox": [ + 511, + 90, + 903, + 286 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Multi-view dynamic reconstruction. Our work is closely connected to multi-view dynamic 3D reconstruction techniques. Early works [32, 34] take the straightforward idea that first pre-segment the scene into different regions, each corresponding to a single rigid part of an object, then apply the rigid-SfM technique to each of the regions. Some of the recent Neural Radiance Fields (NeRF) [28] and Gaussian Splatting [24] based methods have achieved state-of-the-art results. However, most of these methods require simultaneous multi-view video inputs or require predefined templates [18]. Shape of motion [42], proposes a new dynamic scene representation to represent the dynamic scene as a set of persistent 3D Gaussians, and optimize the representation from a monocular video by leveraging monocular depth estimation priors and 2D track estimates across frames.", + "bbox": [ + 511, + 287, + 906, + 513 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 513, + 526, + 604, + 542 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Preliminary", + "text_level": 1, + "bbox": [ + 511, + 551, + 643, + 566 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The overview of our POMATO is demonstrated in Fig.3. We adopt the definition of pointmap $\\mathbf{X} \\in \\mathbb{R}^{H \\times W \\times 3}$ in DUSt3R [44] as a dense 2D field of 3D points where each point corresponds to its respective RGB pixel. Given a pair of input images $\\mathbf{I}^1, \\mathbf{I}^2 \\in \\mathbb{R}^{H \\times W \\times 3}$ from two different views, a weight-sharing ViT first extracts the corresponding features $\\mathbf{F}^1, \\mathbf{F}^2$ for each view. Two parallel branches are employed to decode the geometric structures and enhance the feature alignment via cross-attention in decoder modules, following a regression head to estimate pointmaps $\\mathbf{X}^{1,1}, \\mathbf{X}^{2,1} \\in \\mathbb{R}^{H \\times W \\times 3}$ along with a confidence map $\\mathbf{C}^{1,1}, \\mathbf{C}^{2,1} \\in \\mathbb{R}^{H \\times W}$ for each image view. Generally, $\\mathbf{X}^{n,m}$ indicates the pointmap $\\mathbf{X}^n$ from camera $n$ expressed in camera $m$ 's coordinate frame, which is obtained by a rigid transformation:", + "bbox": [ + 511, + 573, + 906, + 800 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {X} ^ {n, m} = \\mathbf {P} _ {m} \\mathbf {P} _ {n} ^ {- 1} h \\left(\\mathbf {X} ^ {n}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 622, + 825, + 903, + 844 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathbf{P}_m, \\mathbf{P}_n \\in \\mathbb{R}^{3 \\times 4}$ are world-to-camera poses for camera $m$ and camera $n$ , respectively, and $h(\\mathbf{X}^n)$ is a homogeneous mapping for the 3D coordinate in camera coordinate", + "bbox": [ + 511, + 854, + 906, + 902 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/6a30322c438fc8178fdadc08700fa2c23601ca083bfd8c9601b0c19f2ade291e.jpg", + "image_caption": [ + "Figure 3. Overview of our training pipeline. (1) Stage I: build upon DUSt3R [44] architecture, we introduce a third regression point-matching head: $\\mathrm{Head}_3$ , which is in parallel to $\\mathrm{Head}_2$ for explicit pointmap matching in 3D space. For each pixel in the second view, the output pointmap coordinate is the 3D point map of the corresponding pixel in the first view. (2) Stage II: we introduce a temporal fusion module in three heads that enables multi-style sequential input for learning temporal motions." + ], + "image_footnote": [], + "bbox": [ + 96, + 88, + 535, + 238 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/e6d6b6d0d562795bdee50bef700038574cb99c8c3950f3ff6313b68af3401c0f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 535, + 88, + 901, + 238 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "of camera $n$", + "bbox": [ + 89, + 316, + 178, + 330 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The task for Decoder 1 and its regression head estimate the 3D points for $\\mathbf{I}^1$ in its own coordinate system while Decoder 2 and its regression head are responsible for estimating pixel-wise 3D coordinates for $\\mathbf{I}^2$ in $\\mathbf{I}^1$ 's coordinate system after a rigid transformation of global rotation and translation. In the following contents, we will first introduce our POMATO with pairwise input images and then extend it to the video sequence input with our temporal motion module.", + "bbox": [ + 89, + 330, + 483, + 452 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Pointmap Matching with Pairwise Input", + "text_level": 1, + "bbox": [ + 89, + 459, + 436, + 474 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As discussed before, the definition of $\\mathbf{X}^{2,1}$ depicts a rigid camera transformation that is ambiguous to reflect explicit matching relationships for dynamic regions. To tackle this, we propose to formulate an explicit pointmap matching $\\mathbf{X}_m^{2,1} \\in \\mathbb{R}^{H \\times W \\times 3}$ that maps dense RGB pixels of $\\mathbf{I}^2$ to 3D coordinates of corresponding pixels in $\\mathbf{I}^1$ under the first image's coordinate system. Given a 2D query pixel at $(x_2, y_2)$ in $\\mathbf{I}^2$ and its corresponding pixel at $(x_1, y_1)$ in $\\mathbf{I}^1$ , the matched pointmap at $(x_2, y_2)$ in $\\mathbf{I}^2$ is:", + "bbox": [ + 89, + 481, + 483, + 618 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {X} _ {m} ^ {2, 1} \\left(x _ {2}, y _ {2}\\right) = \\mathbf {X} ^ {1, 1} \\left(x _ {1}, y _ {1}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 186, + 622, + 480, + 640 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $(x,y)$ indicates the coordinates of 2D grid. For the representative dynamic point (red) in Fig. 2, the pointmap matching result is the 3D coordinate of point A in the coordinate system of the first image. As shown in Fig. 3, $\\mathbf{X}_m^{2,1}$ and $\\mathbf{X}^{1,1}$ are supposed to match perfectly in 3D space on the premise of neglecting occluded regions. We argue that the set of decoder tokens from the second branch preserves abundant matching information with iterative cross-attentions, so we introduce a matching head with the same architecture of $\\mathrm{Head}_1$ and $\\mathrm{Head}_2$ . The supervision for pointmap matching $\\mathbf{X}_m^{2,1}$ still follows the 3D regression loss which is defined as the Euclidean distance:", + "bbox": [ + 89, + 645, + 483, + 825 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {m}} = \\left\\| \\frac {1}{z _ {m}} \\mathbf {X} _ {m} ^ {2, 1} - \\frac {1}{\\bar {z} _ {m}} \\bar {\\mathbf {X}} _ {m} ^ {2, 1} \\right\\|, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 837, + 480, + 869 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\bar{\\mathbf{X}}_m^{2,1}$ is the ground truth pointmap matching, which can be obtained following Eq. 2 on the 2D tracking dataset", + "bbox": [ + 89, + 869, + 483, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "with the depth and camera information. $z_{m},\\bar{z}_{m}$ are the same norm factor defined in DUSt3R. The matching confidence $\\mathbf{C}_m^{2,1}$ is also learned following the confidence loss for $\\mathrm{Head}_1$ and $\\mathrm{Head}_2$ within valid regions:", + "bbox": [ + 511, + 316, + 906, + 378 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {m c o n f}} = \\mathbf {C} _ {m} ^ {2, 1} \\mathcal {L} _ {\\mathrm {m}} - \\alpha \\log \\mathbf {C} _ {m} ^ {2, 1} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 607, + 390, + 903, + 409 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The final loss $\\mathcal{L}$ of our POMATO for pairwise input is a combination of predefined DUSt3R loss $\\mathcal{L}_{\\mathrm{DUSt3R}}$ , matching loss $\\mathcal{L}_{\\mathrm{m}}$ , and matching confidence loss $\\mathcal{L}_{\\mathrm{mconf}}$ . When training our POMATO for pairwise input images at the first stage, the parameters in the encoder are frozen.", + "bbox": [ + 511, + 410, + 905, + 486 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Dynamic Mask Estimation", + "text_level": 1, + "bbox": [ + 511, + 494, + 754, + 508 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Taking advantage of the explicit pointmap matching head, our POMATO can directly perform dynamic mask estimation without introducing an assistant module such as the optical flow model, getting rid of the additional computation cost and the potential domain gap. For an image pair $\\{\\mathbf{I}^i,\\mathbf{I}^j\\}$ along with the estimation of $\\mathbf{X}^{j,i}$ from $\\mathrm{Head}_2$ and $\\mathbf{X}_{m}^{j,i}$ from $\\mathrm{Head}_3$ , the dynamic mask $\\mathbf{D}^{j,i}$ can be obtained by comparing the difference between $\\mathbf{X}^{j,i}$ and $\\mathbf{X}_{m}^{j,i}$ :", + "bbox": [ + 511, + 516, + 906, + 638 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {D} ^ {j, i} = \\left| \\left| \\mathbf {X} _ {m} ^ {j, i} - \\mathbf {X} ^ {j, i} \\right| \\right| > \\alpha , \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 614, + 646, + 903, + 664 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\alpha$ is a dynamic threshold defined as $3 \\times$ median $(\\|\\mathbf{X}_m^{j,i} - \\mathbf{X}^{j,i}\\|)$ . The explicit dynamic mask can be incorporated into the global alignment process to minimize the interference of moving objects for pose estimation and 3D reconstruction. Details on the incorporation of dynamic masks for global alignment are provided in the supplementary materials.", + "bbox": [ + 511, + 672, + 906, + 779 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Temporal Motion Module", + "text_level": 1, + "bbox": [ + 511, + 787, + 748, + 803 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "With the fundamental capability of geometric estimation and pointmap matching for pairwise images, we follow [6] and extend our POMATO to 4D video sequences by inserting a transformer-based motion module into the vanilla DPT head to construct the \"temporal DPT head\", which is illustrated in Fig.4. For a set of decoder tokens $\\mathbf{G} \\in \\mathbb{R}^{B,T,N,C}$", + "bbox": [ + 511, + 809, + 905, + 900 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/e2fe70253e4295dea812bf5a75af67c019b523b5a34fe2c4ad961025ba265b7c.jpg", + "image_caption": [ + "Figure 4. Architecture of our temporal motion module. We insert a transformer-based motion module (in shallow yellow) into the vanilla DPT [33] head to enhance the temporal consistency." + ], + "image_footnote": [], + "bbox": [ + 130, + 87, + 441, + 282 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $B, T, N, C$ represent the batch size, window length of a video sequence, token number, and token dimension, respectively, we merge the token number dimension into the batch axis and apply the motion module which consists of two blocks of standard multi-head self-attention modules and feed-forward networks along the temporal dimension $T$ . To reduce the computation cost, the temporal motion modules are applied to features of low resolution.", + "bbox": [ + 89, + 349, + 483, + 472 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5. Downstream Temporal Tasks", + "text_level": 1, + "bbox": [ + 89, + 481, + 352, + 498 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Given a video sequence of $T$ frames $\\mathbf{I}^{t_1},\\mathbf{I}^{t_2},\\ldots ,\\mathbf{I}^{t_T}$ , we construct a unique set of stereo image pairs for each task. As illustrated in Fig. 5, the flexible construction of input pairs—combined with the proposed temporal motion module and pointmap matching head—enables POMATO to seamlessly address downstream temporal tasks, including 3D point tracking, video depth estimation, and 3D reconstruction. The keyframe selection strategy and input formulation for each task are detailed in the following section.", + "bbox": [ + 89, + 503, + 483, + 640 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Besides the default regression losses for $\\mathrm{Head}_1$ and $\\mathrm{Head}_2$ , and predefined losses Eq. 3 and Eq. 4 for $\\mathrm{Head}_3$ , we further employ a temporal consistency loss, $\\mathcal{L}_{\\mathrm{t}}$ , which will be described in detail below.", + "bbox": [ + 89, + 641, + 483, + 699 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In addition to the default regression losses for $\\mathrm{Head}_1$ and $\\mathrm{Head}_2$ , and the predefined losses in Eq. 3 and Eq. 4 for $\\mathrm{Head}_3$ , we further introduce a temporal consistency loss, $\\mathcal{L}_{\\mathrm{t}}$ , which will also be described in detail below.", + "bbox": [ + 89, + 700, + 482, + 761 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3D Point Tracking. As illustrated at the top of Fig.5, the keyframe is set to the first image of the global video sequence and fed to the proposed $\\mathrm{Head}_3$ to obtain the pointmap matching result of each query point (initialized at the first image) under the coordinate system of each reference frame $\\{\\mathbf{X}_{m}^{t_{1},t_{1}},\\mathbf{X}_{m}^{t_{1},t_{2}},\\mathbf{X}_{m}^{t_{1},t_{3}},\\dots \\mathbf{X}_{m}^{t_{1},t_{T}}\\}$ , while the set of reference frames $\\{\\mathbf{I}^{t_1},\\mathbf{I}^{t_2},\\mathbf{I}^{t_3},\\dots \\mathbf{I}^{t_T}\\}$ is fed to the $\\mathrm{Head}_1$ to obtain the pointmap under each ego coordinate system. The dense tracking results can be further sparsified by indexing the 2D coordinates. When inference on a video", + "bbox": [ + 89, + 761, + 483, + 901 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/8239ab20be4562800b2c68569d3d5ab2c42bca1f03dcdbe48b9eabdda64d41a0.jpg", + "image_caption": [ + "Figure 5. Inference pipelines for point tracking, video depth, and multi-view reconstruction. $t_k$ indicates the keyframe. With the help of the motion module and flexible input construction, PO-MATO can be easily applied to downstream temporal tasks." + ], + "image_footnote": [], + "bbox": [ + 519, + 90, + 906, + 406 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "longer than $T$ frames, a simple sliding-window approach with an overlap of four frames is adopted to enhance the consistency between adjacent video windows. The temporal consistency loss $\\mathcal{L}_{\\mathrm{t}}$ for tracking is:", + "bbox": [ + 511, + 484, + 906, + 542 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {t}} = \\frac {1}{T} \\sum_ {i = 1} ^ {T} \\left\\| \\frac {\\mathbf {X} _ {m} ^ {t _ {1} , t _ {i}}}{z _ {m} ^ {T}} - \\frac {\\bar {\\mathbf {X}} _ {m} ^ {t _ {1} , t _ {i}}}{\\bar {z} _ {m} ^ {T}} \\right\\| + \\left\\| \\frac {\\mathbf {X} ^ {t _ {i} , t _ {i}}}{z ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\\bar {z} ^ {T}} \\right\\|, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 527, + 551, + 905, + 589 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $z_{m}^{T} = \\mathrm{norm}\\left(\\mathbf{X}_{m}^{t_{1},t_{1}},\\mathbf{X}_{m}^{t_{1},t_{2}},\\dots,\\mathbf{X}_{m}^{t_{1},t_{T}}\\right)$ and $\\bar{z}_T =$ norm $(\\bar{\\mathbf{X}}_m^{t_1,t_1},\\bar{\\mathbf{X}}_m^{t_1,t_2},\\dots,\\bar{\\mathbf{X}}_m^{t_1,t_T})$ . $z_{m}^{T}$ and $\\bar{z}_T$ are defined analogously.", + "bbox": [ + 511, + 599, + 905, + 643 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Video Depth Estimation. As shown in the middle part of the Fig. 5, the input video sequence is formulated to a set of identical image pairs $\\{(\\mathbf{I}^{t_1},\\mathbf{I}^{t_1}),(\\mathbf{I}^{t_2},\\mathbf{I}^{t_2}),\\dots,(\\mathbf{I}^{t_T},\\mathbf{I}^{t_T})\\}$ and fed to $\\mathrm{Head}_1$ and $\\mathrm{Head}_2$ , where the predictions from each head are identical: $\\{\\mathbf{X}^{t_1,t_1},\\mathbf{X}^{t_2,t_2},\\dots,\\mathbf{X}^{t_N,t_N}\\}$ . We use the output of $\\mathrm{Head}_1$ as our final video depth estimation. The temporal consistency loss $\\mathcal{L}_{\\mathrm{t}}$ is defined as:", + "bbox": [ + 511, + 643, + 905, + 742 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {t}} = \\frac {1}{T} \\sum_ {i = 1} ^ {T} \\left\\| \\frac {\\mathbf {X} _ {1} ^ {t _ {i} , t _ {i}}}{z _ {1} ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\\bar {z} ^ {T}} \\right\\| + \\left\\| \\frac {\\mathbf {X} _ {2} ^ {t _ {i} , t _ {i}}}{z _ {2} ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\\bar {z} ^ {T}} \\right\\|, (7)\n$$\n", + "text_format": "latex", + "bbox": [ + 529, + 752, + 906, + 789 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{X}_1^{t_i,t_i}$ and $\\mathbf{X}_2^{t_i,t_i}$ indicate the output from Head_1 and Head_2, respectively. $\\bar{\\mathbf{X}}^{t_i,t_i}$ is the pointmap groundtruth.", + "bbox": [ + 511, + 799, + 905, + 830 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3D Reconstruction. Assisted by the temporal motion module, redundant post-process operations such as global alignment can be omitted, allowing the reconstructed 3D point cloud to be obtained in a feed-forward manner. As shown in the bottom part of Fig.5, the keyframe is set to the last frame", + "bbox": [ + 511, + 830, + 905, + 900 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/d664dc6c171f8e6a159f2659a62cc7b36112bf928cff2e86cd189897267afe7a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
AlignmentMethodOptim. Onl.Sintel [4]BONN [30]KITTI [13]
Abs Rel ↓δ<1.25 ↑Abs Rel ↓δ<1.25 ↑Abs Rel ↓δ<1.25 ↑
Per-sequence scaleDUSt3R-GA [44]0.65645.20.15583.30.14481.3
MASt3R-GA [26]0.64143.90.25270.10.18374.5
MonST3R-GA [52]0.37855.80.06796.30.16874.4
Spann3R [41]0.62242.60.14481.30.19873.7
CUT3R [43]0.42147.90.07893.70.11888.1
POMATO0.41653.60.07496.10.08593.3
Per-sequence scale & shiftMonST3R-GA [52]0.33558.50.06396.40.10489.5
CUT3R [43]0.46656.20.11188.30.07594.3
POMATO0.34557.90.07296.50.08493.4
", + "bbox": [ + 132, + 88, + 859, + 275 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1. Video depth evaluation. We report scale-invariant depth and scale & shift invariant depth accuracy on Sintel [4], Bonn [30], and KITTI [13] datasets. Methods requiring global alignment are marked “GA”, while “Optim.” and “Onl.” indicate optimization-based and online methods, respectively. The best and second best results in each category are bold and underlined, respectively.", + "bbox": [ + 89, + 280, + 906, + 324 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$\\mathbf{I}^{t_T}$ within the temporal window of length $T$ and is fed to $\\mathrm{Head}_1$ with a set output of $\\{\\mathbf{X}^{t_T,t_T},\\mathbf{X}^{t_T,t_T},\\dots,\\mathbf{X}^{t_T,t_T}\\}$ . All the reference frames are input to the $\\mathrm{Head}_2$ so the target pointmaps $\\{\\mathbf{X}^{t_1,t_T},\\mathbf{X}^{t_2,t_T},\\dots,\\mathbf{X}^{t_T,t_T}\\}$ are aligned under the coordinate system of the keyframe. The temporal consistency loss $\\mathcal{L}_{\\mathrm{t}}$ is:", + "bbox": [ + 89, + 349, + 482, + 434 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {t}} = \\frac {1}{T} \\sum_ {i = 1} ^ {T} \\left\\| \\frac {\\mathbf {X} ^ {t _ {T} , t _ {T}}}{z _ {1} ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {T} , t _ {T}}}{\\bar {z} _ {1} ^ {T}} \\right\\| + \\left\\| \\frac {\\mathbf {X} ^ {t _ {i} , t _ {T}}}{z _ {2} ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {i} , t _ {T}}}{\\bar {z} _ {2} ^ {T}} \\right\\| \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 445, + 482, + 479 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We further freeze the parameters in Decoder1 and Decoder2 when training the temporal downstream tasks at the second stage. In our work, the temporal window length $T$ is set to 12. Additional explorations on the temporal length can be found in Sec.4.", + "bbox": [ + 89, + 489, + 483, + 566 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 89, + 580, + 223, + 598 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Experimental Details", + "text_level": 1, + "bbox": [ + 89, + 606, + 290, + 622 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training data. We train our network with a mixture of five datasets: PointOdyssey [54], Tartanair [45], ParallelDomain4D [40], DynamicReplica [21] and Carla (0.9.15) [10]. The specific number and the usage ratio of each dataset can be found in the supplementary materials. All datasets include pixel-accurate ground truth depth, as well as camera intrinsics and extrinsics, and encompass a wide variety of dynamic scenes across both indoor and outdoor environments. Among them, PointOdyssey and DynamicReplica have additional 2D trajectory annotations for dynamic objects which can be used to construct pointmap matching ground truth following Eq. 2. All datasets are used to supervise geometry learning on $\\mathrm{Head}_1$ and $\\mathrm{Head}_2$ , while only PointOdyssey, DynamicReplica, and TartanAir are used to train the proposed pointmap matching head.", + "bbox": [ + 89, + 628, + 482, + 854 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training and inference details. Our model architecture is based on the publicly available DUSt3R [52] model, utilizing the same backbone consisting of a ViT-Large encoder", + "bbox": [ + 89, + 854, + 483, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "and a ViT-Base decoder. To fully leverage MonST3R's geometry estimation capabilities in dynamic scenes, we initialize our model using the publicly available MonST3R checkpoint. For the newly introduced pointmap matching head, we initialize its weights from the pretrained $\\mathrm{Head}_2$ weights of MonST3R. The temporal motion module is initialized following [14]. We train our network for 10 epochs with a cosine learning rate schedule, with an initial learning rate of 1e-4. In the first stage, which involves pairwise training, we use a batch size of 16 on 4 A100 GPUs (40G). In the second stage, where the temporal motion module is introduced, the batch size is set to 4 with a fixed temporal window length of 12. During each training iteration, we randomly sample a downstream task—3D point tracking, video depth estimation, or 3D reconstruction—to construct the input pairs and apply the corresponding loss function.", + "bbox": [ + 511, + 349, + 906, + 592 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Video Depth Estimation", + "text_level": 1, + "bbox": [ + 511, + 604, + 733, + 619 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Following MonST3R [52] and CUT3R [43], we rescale all predictions from the same video to align them together by conducting two forms of alignment: per-sequence scale and shift alignment and per-sequence scale alignment. Thus, we can measure the per-frame depth quality and inter-frame depth consistency. We employ our proposed motion module for video depth estimation in a feed-forward manner as described in Sec.3.5 and compare our method against several variants of DUSt3R, including DUSt3R [44], MAST3R [26], MonST3R [52], Spann3R [41], and CUT3R [43]. Given 6 frames of $288 \\times 512$ on an NVIDIA 4070 GPU, POMATO reconstructs the 3D point cloud in 0.7 seconds, whereas global alignment-based methods such as MonST3R require 5.8 seconds. As shown in Tab. 1, our method demonstrates comparable performance to the global alignment (GA)-based MonST3R [52] on the Sintel [4] and BONN [30] datasets, while surpassing it on KITTI dataset. Besides, we", + "bbox": [ + 511, + 628, + 906, + 900 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/f5f28d1c790be8bf149f762f29cd56a6fb328dbe358471623ca58d55907612bc.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodPointOdyssey [54]ADT [31]PStudio [20]Average
L-12L-24L-12L-24L-12L-24L-12L-24
SpatialTracker* [47]20.4620.7121.6420.6730.4125.8724.1722.42
DUSt3R [44]19.0319.0329.0225.559.726.5019.2617.03
MASt3R [26]16.5817.3527.3626.4611.788.0918.5717.30
MonST3R [52]27.3127.9228.3026.1316.5011.0624.0321.70
POMATO33.2033.5831.5728.2224.5919.7929.7927.20
", + "bbox": [ + 205, + 93, + 787, + 202 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. 3D tracking evaluation. We report the APD metric to evaluate 3D point tracking on the PointOdyssey [54], ADT [31], and PStudio [20] datasets. L-12 and L-24 indicate tracking within the temporal length of 12 frames and 24 frames, respectively.", + "bbox": [ + 89, + 208, + 906, + 236 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ffa2a8e80de9dcbc736d37d3eb5d1989e36111cb092a54c8fa4c690e19d35a0a.jpg", + "image_caption": [ + "Figure 6. Qualitative comparison of dynamic scenes. Compared to MonST3R, our POMATO can provide more reliable motion masks, 3D point tracking, and reconstruction performance." + ], + "image_footnote": [], + "bbox": [ + 112, + 250, + 888, + 501 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "consistently outperform the state-of-the-art online method, CUT3R [43], across various settings. These results underscore the effectiveness of our approach, specifically (1) the joint learning of geometry and pointmap matching, and (2) the temporal motion module.", + "bbox": [ + 88, + 566, + 482, + 643 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. 3D Point Tracking", + "text_level": 1, + "bbox": [ + 89, + 651, + 269, + 667 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For 3D point tracking task, we use the Aria Digital Twin (ADT) [31], and Panoptic Studio (PStudio) [20] benchmarks from the TAPVid-3D [25] dataset along with the validation set on the PointOdyssey [54] dataset. We report the Average Percent Deviation (APD) metric, which quantifies the average percentage of points within a threshold relative to the ground truth depth. The APD metric serves as a direct measure of the accuracy of the predicted tracking. We reformulate the datasets and project all the query points within a temporal window to the first frame. We report tracking results on the length of 12 and 24 frames. As shown in Tab.2, our POMATO achieves the best performance on both PointOdyssey and ADT datasets. It's worth mentioning that SpatialTracker [47] is a state-of-the-art network tailored for 3D point tracking with ground truth camera intrinsic as ad", + "bbox": [ + 89, + 674, + 483, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ditional input data. POMATO surpasses it on two datasets and improves the average APD metric by $23.3\\%$ and $21.4\\%$ for 12 frames and 24 frames, respectively. For DUSt3R-based methods, we use the output of $\\mathrm{Head}_2$ as tracking results. Obviously, the ambiguous matching representation limits its capability to handle this fine-grained 3D reconstruction task in dynamic scenes.", + "bbox": [ + 511, + 566, + 906, + 675 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Camera Pose Estimation", + "text_level": 1, + "bbox": [ + 511, + 681, + 740, + 696 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Following DUSt3R-based methods, we perform global alignment with the model trained in the first stage on the Bonn [30] and TUM [37] datasets. The sampling stride is set to 5 for the Bonn dataset and 3 for the TUM dataset. Compared with optical-flow assisted global alignment in MonST3R, the dynamic mask is computed according to Eq. 5 while the 2D pseudo label is replaced by projecting the pointmap matching results to 2D coordinates with estimated camera intrinsic. Absolute Translation Error (ATE), Relative Translation Error (RPE trans), and Relative Rotation Error (RPE rot) are reported. The evaluation results over 40 frames are reported in Tab. 4. Notably, POMATO obtains an overall state-of-the-art performance and signifi", + "bbox": [ + 511, + 703, + 906, + 900 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/48af71f3300680b8034c43f62dca0f4717a244f56bdddfab423db451007292d7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Temporal LengthVideo DepthTracking (12 Frames)
Sintel [4]Bonn [30]KITTI [13]PointOdyssey [54] ADT [31] PStudio [20]
Abs Rel ↓ δ<1.25 ↑Abs Rel ↓ δ<1.25 ↑Abs Rel ↓ δ<1.25 ↑Abs Rel ↓ δ<1.25 ↑APD↑APD↑APD↑
Pair-wise0.54846.20.08794.00.11389.532.0629.8723.10
6 frames0.43651.30.07695.90.08593.532.6930.9324.52
12 frames0.41653.60.07596.10.08693.333.2031.5724.59
", + "bbox": [ + 174, + 88, + 823, + 186 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/d5e4b9b2b316a0891838b1d256865499a344168e46176581f1098b0fa64d8c42.jpg", + "table_caption": [ + "Table 3. Ablation study on the temporal motion module. The introduction of the temporal motion module brings a significant improvement. As the temporal window length enlarges from 6 frames to 12 frames, we obtain an overall consistent improvement." + ], + "table_footnote": [], + "table_body": "
MethodTUM [37]Bonn [30]
ATE ↓RPE trans ↓RPE rot ↓ATE ↓RPE trans ↓RPE rot ↓
DUSt3R [44]0.0250.0132.3610.0300.0252.522
MASt3R [26]0.0270.0151.9100.0310.0252.478
MonST3R [52]0.0210.0061.1420.0250.0212.120
CUT3R [43]0.0230.0160.5100.0280.0332.569
POMATO0.0200.0100.5090.0370.0161.782
", + "bbox": [ + 104, + 250, + 470, + 337 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/48fc00ed0ccff1357aa20e08011e8b828d049473515dabed56bb834c4997282a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 102, + 391, + 220, + 444 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/98a876741c25930c1c2f4bfce1d1c178379109f82b67d15153cad3df741cec13.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 102, + 445, + 218, + 497 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c01e277c9d6d6d62313b376a0e50c866e3c5aa3c837fecebdb575154bd8f9543.jpg", + "image_caption": [ + "Input Images", + "Figure 7. Effectiveness of our pointmap matching head. Without explicitly filtering out the motion area, both pose and geometry estimation will be degraded." + ], + "image_footnote": [], + "bbox": [ + 102, + 500, + 218, + 551 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/abf58e9ec06158973e58dbe4a60955efcc728a923015cf9b8a45e5acad73ffcb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 222, + 392, + 470, + 459 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a00bf8bcacaf47797f494012a320e2a40fdd47220aa6b3eb0c87cdcafa2a834a.jpg", + "image_caption": [ + "3D Reconstruction with our Pointmap Matching." + ], + "image_footnote": [], + "bbox": [ + 222, + 459, + 470, + 546 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/80719c660669e2d285012f6663ec105a6137bf97e42c4107de2e9c454bfec2f8.jpg", + "table_caption": [ + "Table 4. Pose estimation. Our method achieves an overall best performance and improves the RPE rot metric significantly." + ], + "table_footnote": [], + "table_body": "
MethodBonn [30]PointOdyssey [54]ADT [31]PStudio [20]
ATE ↓RPE trans ↓RPE rot ↓APD ↑APD ↑APD ↑
W/O Head30.0400.0151.72129.1029.6216.94
W/ Head30.0370.0161.78232.0629.8723.10
", + "bbox": [ + 104, + 637, + 468, + 691 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 5. Ablation study on the effectiveness of the pointmap matching head. The comparisons are reported on the pose estimation and 3D point tracking tasks.", + "bbox": [ + 89, + 702, + 482, + 744 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "cantly improves the RPE-rot metric, surpassing MonST3R by $55.4\\%$ and $13.3\\%$ on the TUM and Bonn datasets.", + "bbox": [ + 89, + 773, + 482, + 804 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Ablation Study", + "text_level": 1, + "bbox": [ + 89, + 816, + 243, + 832 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We conduct extensive ablation studies to evaluate the effectiveness of the temporal motion module and the proposed pointmap matching head. As shown in Table 3, we report results for three models: one trained with only pairwise", + "bbox": [ + 89, + 839, + 483, + 901 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "images (first-stage training), one using a shorter temporal window of 6 frames, and another using the default temporal window length of 12 frames. Incorporating temporal consistency yields substantial improvements across all datasets for video depth estimation and 3D point tracking. Further improvement is achieved when the temporal window length increases from 6 frames to 12 frames. In Table 5, we evaluate the effectiveness of the pointmap matching head. While it introduces only a modest improvement in the ATE metric, we attribute this to the limited motion and minimal viewpoint variation in the indoor evaluation dataset. As illustrated in Fig. 7, under challenging in-the-wild conditions with significant motion and rapid viewpoint changes, removing the pointmap matching head introduces ambiguity in explicit rigid transformation estimation, resulting in a clear degradation in performance. To further demonstrate the impact of the pointmap matching head on 3D point tracking, we conduct tracking experiments over 12 frames using the pairwise input setup. Clearly, removing the pointmap matching head (using only $\\mathrm{Head}_2$ ) leads to an inevitable performance drop, emphasizing explicit correspondence modeling for reliable long-term tracking.", + "bbox": [ + 511, + 252, + 906, + 585 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Discussion and Conclusion", + "text_level": 1, + "bbox": [ + 511, + 598, + 761, + 614 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We introduce POMATO, a unified framework for geometry estimation and motion understanding in dynamic scenes. By leveraging the proposed pointmap matching head, our method effectively distinguishes moving regions, thereby mitigating the interference introduced by dynamic objects. The temporal motion module further facilitates the learning of temporal dynamics across frames, enhancing scale consistency and improving performance in tasks where both geometry and matching are critical—most notably, 3D point tracking. The downstream temporal tasks including 3D point tracking, video depth estimation, and 3D reconstruction can be easily applied in a feed-forward manner. In future work, we plan to scale up training with more dynamic reconstruction and matching datasets to further enhance 3D reconstruction and tracking performance.", + "bbox": [ + 511, + 623, + 906, + 849 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement. This work was supported by the National Natural Science Foundation of China (No. 62206244)", + "bbox": [ + 511, + 849, + 905, + 895 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 89, + 187, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Daniel Barath, Dmytro Mishkin, Luca Cavalli, Paul-Edouard Sarlin, Petr Hruby, and Marc Pollefeys. Affineglue: Joint matching and robust estimation. arXiv preprint arXiv:2307.15381, 2023. 3", + "[2] Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendelevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, Varun Jampani, and Robin Rombach. Stable video diffusion: Scaling latent video diffusion models to large datasets. abs/2311.15127, 2023. 3", + "[3] Aleksei Bochkovskii, Amael Delaunoy, Hugo Germain, Marcel Santos, Yichao Zhou, Stephan R. Richter, and Vladlen Koltun. Depth pro: Sharp monocular metric depth in less than a second. arXiv, 2024. 3", + "[4] Daniel J. Butler, Jonas Wulff, Garrett B. Stanley, and Michael J. Black. A naturalistic open source movie for optical flow evaluation. In ECCV, pages 611-625, 2012. 6, 8", + "[5] Ang Cao and Justin Johnson. Hexplane: A fast representation for dynamic scenes. CVPR, 2023. 2", + "[6] Sili Chen, Hengkai Guo, Shengnan Zhu, Feihu Zhang, Zi long Huang, Jiashi Feng, and Bingyi Kang. Video depth anything: Consistent depth estimation for super-long videos. arXiv preprint arXiv:2501.12375, 2025. 4", + "[7] Yu Chen, Yisong Chen, and Guoping Wang. Bundle adjustment revisited. arXiv preprint arXiv: 1912.03858, 2019. 3", + "[8] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 224-236, 2018. 3", + "[9] Carl Doersch, Yi Yang, Mel Vecerik, Dilara Gokay, Ankush Gupta, Yusuf Aytar, Joao Carreira, and Andrew Zisserman. Tapir: Tracking any point with per-frame initialization and temporal refinement. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10061-10072, 2023. 3", + "[10] Alexey Dosovitskiy, German Ros, Felipe Codevilla, Antonio Lopez, and Vladlen Koltun. Carla: An open urban driving simulator. In Conference on robot learning, pages 1-16. PMLR, 2017. 6, 2", + "[11] Xiao Fu, Wei Yin, Mu Hu, Kaixuan Wang, Yuexin Ma, Ping Tan, Shaojie Shen, Dahua Lin, and Xiaoxiao Long. Geowizard: Unleashing the diffusion priors for 3d geometry estimation from a single image. arXiv preprint arXiv: 2403.12013, 2024. 3", + "[12] Yasutaka Furukawa, Carlos Hernández, et al. Multi-view stereo: A tutorial. Foundations and Trends® in Computer Graphics and Vision, 9(1-2):1-148, 2015. 3", + "[13] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In CVPR, pages 3354-3361, 2012. 6, 8", + "[14] Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff:Animate your personalized text-to-image diffusion models without specific tuning, 2023.6" + ], + "bbox": [ + 93, + 114, + 483, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[15] Adam W. Harley, Zhaoyuan Fang, and Katerina Fragkiadaki. Particle video revisited: Tracking through occlusions using point trajectories. In ECCV, pages 59-75. Springer, 2022. 3", + "[16] Mu Hu, Wei Yin, Chi Zhang, Zhipeng Cai, Xiaoxiao Long, Hao Chen, Kaixuan Wang, Gang Yu, Chunhua Shen, and Shaojie Shen. Metric3d v2: A versatile monocular geometric foundation model for zero-shot metric depth and surface normal estimation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 2, 3", + "[17] Wenbo Hu, Xiangjun Gao, Xiaoyu Li, Sijie Zhao, Xiaodong Cun, Yong Zhang, Long Quan, and Ying Shan. Depthcrafter: Generating consistent long depth sequences for open-world videos. In CVPR, 2025. 3", + "[18] Mustafa Işık, Martin Rünz, Markos Georgopoulos, Taras Khakhulin, Jonathan Starck, Lourdes Agapito, and Matthias Nießner. Humanrf: High-fidelity neural radiance fields for humans in motion. ACM Transactions on Graphics (TOG), 42(4):1-12, 2023. 3", + "[19] Muhammad Zubair Irshad, Mauro Comi, Yen-Chen Lin, Nick Heppert, Abhinav Valada, Rares Ambrus, Zsolt Kira, and Jonathan Tremblay. Neural fields in robotics: A survey. arXiv preprint arXiv: 2410.20220, 2024. 2", + "[20] Hanbyul Joo, Tomas Simon, Xulong Li, Hao Liu, Lei Tan, Lin Gui, Sean Banerjee, Timothy Scott Godisart, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh. Panoptic studio: A massively multiview system for social interaction capture. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2017. 7, 8", + "[21] Nikita Karaev, Ignacio Rocco, Benjamin Graham, Natalia Neverova, Andrea Vedaldi, and Christian Rupprecht. Dynamic stereo: Consistent dynamic depth from stereo videos. CVPR, 2023. 6, 2", + "[22] Nikita Karaev, Ignacio Rocco, Benjamin Graham, Natalia Neverova, Andrea Vedaldi, and Christian Rupprecht. Co-tracker: It is better to track together. In Proc. ECCV, 2024. 3", + "[23] Bingxin Ke, Anton Obukhov, Shengyu Huang, Nando Metzger, Rodrigo Caye Daudt, and Konrad Schindler. Repurposing diffusion-based image generators for monocular depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2, 3", + "[24] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM TOG, 42(4):139-1, 2023. 3", + "[25] Skanda Koppula, Ignacio Rocco, Yi Yang, Joe Heyward, João Carreira, Andrew Zisserman, Gabriel Brostow, and Carl Doersch. Tapvid-3d: A benchmark for tracking any point in 3d. arXiv preprint arXiv: 2407.05921, 2024. 7", + "[26] Vincent Leroy, Yohann Cabon, and Jérôme Revaud. Grounding image matching in 3d with mast3r. European Conference on Computer Vision, 2024. 2, 6, 7, 8", + "[27] Philipp Lindenberger, Paul-Edouard Sarlin, and Marc Pollefeys. Lightglue: Local feature matching at light speed. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17627-17638, 2023. 3", + "[28] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf:" + ], + "bbox": [ + 516, + 92, + 903, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 3", + "[29] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, and Marc Szafraniec et al. DINOv2: Learning robust visual features without supervision. Trans. Mach. Learn. Research, 2024. 3", + "[30] Emanuele Palazzolo, Jens Behley, Philipp Lottes, Philippe Giguere, and Cyril Stachniss. Refusion: 3d reconstruction in dynamic environments for rgb-d cameras exploiting residuals. In 2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 7855-7862. IEEE, 2019. 6, 7, 8", + "[31] Xiaqing Pan, Nicholas Charron, Yongqian Yang, Scott Peters, Thomas Whelan, Chen Kong, Omkar Parkhi, Richard Newcombe, and Carl Yuheng Ren. Aria digital twin: A new benchmark dataset for egocentric 3d machine perception. arXiv preprint arXiv: 2306.06362, 2023. 7, 8", + "[32] Rene Ranftl, Vibhav Vineet, Qifeng Chen, and Vladlen Koltun. Dense monocular depth estimation in complex dynamic scenes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4058-4066, 2016. 3", + "[33] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In Proceedings of the IEEE/CVF international conference on computer vision, pages 12179-12188, 2021. 5", + "[34] Chris Russell, Rui Yu, and Lourdes Agapito. Video popuup: Monocular 3d reconstruction of dynamic scenes. In European conference on computer vision, pages 583-598. Springer, 2014. 3", + "[35] Peter Sand and Seth Teller. Particle video: Long-range motion estimation using point trajectories. International journal of computer vision, 80:72-91, 2008. 3", + "[36] Jiahao Shao, Yuanbo Yang, Hongyu Zhou, Youmin Zhang, Yujun Shen, Matteo Poggi, and Yiyi Liao. Learning temporally consistent video depth from video diffusion priors. abs/2406.01493, 2024. 3", + "[37] Jürgen Sturm, Nikolas Engelhard, Felix Endres, Wolfram Burgard, and Daniel Cremers. A benchmark for the evaluation of RGB-D SLAM systems. pages 573-580, 2012. 7, 8", + "[38] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part II 16, pages 402–419. Springer, 2020. 3", + "[39] Zachary Teed and Jia Deng. Droid-slam: Deep visual slam for monocular, stereo, and rgb-d cameras. Neural Information Processing Systems, 2021. 2", + "[40] Basile Van Hoorick, Rundi Wu, Ege Ozguroglu, Kyle Sargent, Ruoshi Liu, Pavel Tokmakov, Achal Dave, Changxi Zheng, and Carl Vondrick. Generative camera dolly: Extreme monocular dynamic novel view synthesis. arXiv preprint arXiv:2405.14868, 2024. 6, 2", + "[41] Hengyi Wang and Lourdes Agapito. 3d reconstruction with spatial memory. arXiv preprint arXiv:2408.16061, 2024. 2, 6" + ], + "bbox": [ + 91, + 92, + 482, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[42] Qianqian Wang, Vickie Ye, Hang Gao, Jake Austin, Zhengqi Li, and Angjoo Kanazawa. Shape of motion: 4d reconstruction from a single video. arXiv preprint arXiv:2407.13764, 2024. 2, 3", + "[43] Qianqian Wang, Yifei Zhang, Aleksander Holynski, Alexei A. Efros, and Angjoo Kanazawa. Continuous 3d perception model with persistent state, 2025. 2, 6, 7, 8", + "[44] Shuzhe Wang, Vincent Leroy, Yohann Cabon, Boris Chidlovskii, and Jerome Revaud. DUSt3R: Geometric 3D vision made easy. In CVPR, pages 20697-20709, 2024. 2, 3, 4, 6, 7, 8", + "[45] Wenshan Wang, Delong Zhu, Xiangwei Wang, Yaoyu Hu, Yuheng Qiu, Chen Wang, Yafei Hu, Ashish Kapoor, and Sebastian Scherer. TartanAir: A dataset to push the limits of visual SLAM. pages 4909-4916, 2020. 6, 2", + "[46] Yihan Wang, Lahav Lipson, and Jia Deng. SEA-RAFT: Simple, efficient, accurate RAFT for optical flow. In ECCV, 2024. 1", + "[47] Yuxi Xiao, Qianqian Wang, Shangzhan Zhang, Nan Xue, Sida Peng, Yujun Shen, and Xiaowei Zhou. Spatialtracker: Tracking any 2d pixels in 3d space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2, 3, 7", + "[48] Guangkai Xu, Yongtao Ge, Mingyu Liu, Chengxiang Fan, Kangyang Xie, Zhiyue Zhao, Hao Chen, and Chunhua Shen. Diffusion models trained with large data are transferable visual models. arXiv preprint arXiv: 2403.06090, 2024. 3", + "[49] Yueming Xu, Haochen Jiang, Zhongyang Xiao, Jianfeng Feng, and Li Zhang. Dg-slam: Robust dynamic gaussian splatting slam with hybrid pose optimization. arXiv preprint arXiv: 2411.08373, 2024. 2", + "[50] Honghui Yang, Di Huang, Wei Yin, Chunhua Shen, Haifeng Liu, Xiaofei He, Binbin Lin, Wanli Ouyang, and Tong He. Depth any video with scalable synthetic data. arXiv preprint arXiv:2410.10815, 2024. 3", + "[51] Lihe Yang, Bingyi Kang, Zilong Huang, Zhen Zhao, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything v2. arXiv:2406.09414, 2024. 2, 3", + "[52] Junyi Zhang, Charles Herrmann, Junhwa Hur, Varun Jampani, Trevor Darrell, Forrester Cole, Deqing Sun, and Ming-Hsuan Yang. Monst3r: A simple approach for estimating geometry in the presence of motion. arXiv preprint arxiv:2410.03825, 2024. 2, 3, 6, 7, 8, 1", + "[53] Guosheng Zhao, Chaojun Ni, Xiaofeng Wang, Zheng Zhu, Xueyang Zhang, Yida Wang, Guan Huang, Xinze Chen, Boyuan Wang, Youyi Zhang, Wenjun Mei, and Xingang Wang. Drivedreamer4d: World models are effective data machines for 4d driving scene representation. 2024. 2", + "[54] Yang Zheng, Adam W. Harley, Bokui Shen, Gordon Wetzstein, and Leonidas J. Guibas. PointOdyssey: A large-scale synthetic dataset for long-term point tracking. In ICCV, 2023. 6, 7, 8, 2" + ], + "bbox": [ + 516, + 92, + 903, + 825 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "POMATO: Marrying Pointmap Matching with Temporal Motions for Dynamic 3D Reconstruction", + "text_level": 1, + "bbox": [ + 166, + 85, + 831, + 130 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Supplementary Material", + "bbox": [ + 380, + 141, + 614, + 162 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A. Pointmap Matching for Global Alignment.", + "text_level": 1, + "bbox": [ + 89, + 178, + 475, + 195 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Given a sequence of video frames, the target of global alignment is to project all pairwise estimated pointmaps to the same global world coordinates. DUSt3R constructs a connectivity pairwise graph and aims to minimize the reprojection error for each image pair globally where the dynamic regions are supposed to be separated from the static regions. To this end, MonST3R [52] further introduces an assistant optical flow network [46] to help mask the dynamic regions and provide a pseudo label of 2D matching for minimizing the re-projection error in static regions. However, the introduced assistant model will introduce inevitable domain gaps and additional computation costs. Besides, the optical flow model is tailored for matching within two adjacent frames, suffering an obvious degeneration with the large view displacement. In POMATO, for an image pair $\\{\\mathbf{I}^i,\\mathbf{I}^j\\}$ , the dynamic mask $\\mathbf{D}^{j,i}$ is calculated by comparing the difference between $\\mathbf{X}^{j,i}$ and $\\mathbf{X}_m^{j,i}$ :", + "bbox": [ + 89, + 205, + 483, + 465 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {D} ^ {j, i} = \\left| \\left| \\mathbf {X} _ {m} ^ {j, i} - \\mathbf {X} ^ {j, i} \\right| \\right| > \\alpha , \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 191, + 479, + 482, + 497 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where $\\alpha$ is a dynamic threshold defined as $3 \\times$ median $(\\|\\mathbf{X}_m^{j,i} - \\mathbf{X}^{j,i}\\|)$ .", + "bbox": [ + 89, + 513, + 482, + 544 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Given the updated camera intrinsic $\\tilde{K}$ after an iteration of optimization, the target matching 2D coordinates $\\mathbf{F}_m^{j,i} \\in \\mathbb{R}^{H \\times W \\times 2}$ can be calculated as $\\mathbf{F}_m^{j,i} = p(\\tilde{\\mathbf{K}}\\mathbf{X}_m^{j,i})$ where $p$ is a mapping from 3D camera coordinates to 2D pixel coordinates. The optical flow loss proposed in MonST3R can thus be modified with our dynamic mask and 2D matching coordinates. Details about the optical flow loss are referred to MonST3R [52].", + "bbox": [ + 89, + 546, + 482, + 667 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "B. Fast 3D Reconstruction with video PO-MATO", + "text_level": 1, + "bbox": [ + 89, + 688, + 482, + 722 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Given a sequence of images less than the temporal window length of 12 frames, dynamic 3D reconstruction can be obtained by directly estimating the pointmaps of all reference images to the coordinate of the key frame as discussed in the Sec.3.4. Here, we provide more visualization results of this feed-forward manner and demonstrate the effectiveness of introducing the temporal motion module. As shown in Fig.8, directly applying the pairwise reconstruction will suffer from an obvious scale shift among different frames. After the temporal motion module, the consistency within the video sequence obtains an obvious enhancement.", + "bbox": [ + 89, + 734, + 482, + 900 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "C. Training Data Details", + "text_level": 1, + "bbox": [ + 513, + 178, + 723, + 195 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The details about the training datasets can be found in Tab.6. The finetuning procedure of POMATO was conducted exclusively using synthetic training datasets.", + "bbox": [ + 511, + 203, + 903, + 250 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "D. More Visualizations on Dynamic Scenes", + "text_level": 1, + "bbox": [ + 511, + 262, + 875, + 280 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We provide more visualizations in Fig. 9 and Fig. 10. MonST3R suffers obvious degeneration when the view displacement is large as reflected by the erroneous pose estimation while POMATO can still provide a consistent camera trajectory.", + "bbox": [ + 511, + 287, + 905, + 364 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/5ae28c25f828dac5cf5304f158000a3d4b15aaa5c22767f9cce24e599bcd63ea.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetDomainScene Type# of Frames# of ScenesDynamicsRatio
PointOdyssey [54]SyntheticIndoors & Outdoors200k131Realistic57.1%
TartanAir [45]SyntheticIndoors & Outdoors100k163None14.3%
DynamicReplica [21]SyntheticIndoors145k524Realistic14.3%
ParallelDomain4D [40]SyntheticOutdoors750k15015Driving8.6%
Carla [10]SyntheticOutdoors7k5Driving5.7%
", + "bbox": [ + 156, + 170, + 841, + 276 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Table 6. An overview of all training datasets and sample ratio. All datasets provide both camera pose, depth, and most of them include dynamic objects.", + "bbox": [ + 89, + 287, + 906, + 316 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/133a5be75fe9852f50d9dc75a3e28f6a3f8ed363159786f123db3e08abb2c86a.jpg", + "image_caption": [ + "Figure 8. Fast 3D reconstruction with our temporal motion module. Given a sequence of images less than temporal window length, our POMATO can directly obtain a global pointmap under the key frame coordinate." + ], + "image_footnote": [], + "bbox": [ + 96, + 484, + 906, + 787 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/33c6bad035555d5e4e9fdaf94cf6b764db4cf1f575afef326383c07bccb07da0.jpg", + "image_caption": [ + "Figure 9. Compared with MonST3R, our POMATO can provide more complete dynamic masks and consistent geometry." + ], + "image_footnote": [], + "bbox": [ + 109, + 141, + 890, + 824 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/1553dd9fea27bacf18cff9e88695799cf7d92dd8244a549641fdbd79c3b38df9.jpg", + "image_caption": [ + "Figure 10. MonST3R suffers obvious degeneration when the view displacement is large as reflected by the erroneous pose estimation while POMATO can still provide a consistent camera trajectory." + ], + "image_footnote": [], + "bbox": [ + 101, + 133, + 890, + 813 + ], + "page_idx": 13 + } +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05692/996e8671-5341-4c85-8ef1-45152320354c_model.json b/data/2025/2504_05xxx/2504.05692/996e8671-5341-4c85-8ef1-45152320354c_model.json new file mode 100644 index 0000000000000000000000000000000000000000..7ca7e79baab2c6d019653c49c59b9471ddde9c76 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/996e8671-5341-4c85-8ef1-45152320354c_model.json @@ -0,0 +1,2164 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.275, + 0.061, + 0.723 + ], + "angle": 270, + "content": "arXiv:2504.05692v2 [eess.IV] 8 Aug 2025" + }, + { + "type": "title", + "bbox": [ + 0.167, + 0.13, + 0.833, + 0.175 + ], + "angle": 0, + "content": "POMATO: Marrying Pointmap Matching with Temporal Motions for Dynamic 3D Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.202, + 0.777, + 0.24 + ], + "angle": 0, + "content": "Songyan Zhang\\(^{1*}\\) Yongtao Ge\\(^{2,3*}\\) Jinyuan Tian\\(^{2*}\\) Guangkai Xu\\(^{2}\\) Hao Chen\\(^{2\\boxtimes}\\) Chen Lv\\(^{1}\\) Chunhua Shen\\(^{2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.246, + 0.813, + 0.283 + ], + "angle": 0, + "content": "1Nanyang Technological University, Singapore 2Zhejiang University, China 3The University of Adelaide, Australia" + }, + { + "type": "image", + "bbox": [ + 0.096, + 0.321, + 0.2, + 0.58 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.207, + 0.325, + 0.611, + 0.58 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.616, + 0.326, + 0.895, + 0.576 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.594, + 0.908, + 0.622 + ], + "angle": 0, + "content": "Figure 1. 3D reconstruction from an arbitrary dynamic video with POMATO. Without relying on external modules, POMATO can directly perform 3D reconstruction along with temporal 3D point tracking and dynamic mask estimation." + }, + { + "type": "title", + "bbox": [ + 0.248, + 0.635, + 0.327, + 0.65 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.667, + 0.483, + 0.877 + ], + "angle": 0, + "content": "Recent approaches to 3D reconstruction in dynamic scenes primarily rely on the integration of separate geometry estimation and matching modules, where the latter plays a critical role in distinguishing dynamic regions and mitigating the interference caused by moving objects. Furthermore, the matching module explicitly models object motion, enabling the tracking of specific targets and advancing motion understanding in complex scenarios. Recently, the proposed representation of pointmap in DUSt3R suggests a potential solution to unify both geometry estimation and matching in 3D space, effectively reducing computational overhead by eliminating the need for redundant auxiliary modules. However, it still struggles with ambiguous correspondences in dynamic regions, which limits reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.637, + 0.908, + 0.877 + ], + "angle": 0, + "content": "performance in such scenarios. In this work, we present POMATO, a unified framework for dynamic 3D reconstruction by marrying POintmap MAtching with Temporal mOtion. Specifically, our method first learns an explicit matching relationship by mapping RGB pixels across different views to 3D pointmaps within a unified coordinate system. Furthermore, we introduce a temporal motion module for dynamic motions that ensures scale consistency across different frames and enhances performance in 3D reconstruction tasks requiring both precise geometry and reliable matching, most notably 3D point tracking. We show the effectiveness of our proposed POMATO by demonstrating the remarkable performance across multiple downstream tasks, including video depth estimation, 3D point tracking, and pose estimation. Code and models are publicly available at https://github.com/wyddmw/POMATO." + }, + { + "type": "page_footnote", + "bbox": [ + 0.115, + 0.887, + 0.363, + 0.9 + ], + "angle": 0, + "content": "* Equal contribution. \\( \\boxdot \\) Corresponding author." + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.1, + 0.12, + 0.16, + 0.136 + ], + "angle": 0, + "content": "Image1" + }, + { + "type": "image", + "bbox": [ + 0.166, + 0.094, + 0.393, + 0.228 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.09, + 0.761, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.763, + 0.094, + 0.899, + 0.228 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.25, + 0.908, + 0.308 + ], + "angle": 0, + "content": "Figure 2. Ambiguity in 3D point matching in dynamic scenes with DUSt3R. Given representative corresponding pixels of background (orange) and moving foreground (red) in two different views, DUSt3R outputs a pair of 3D points within the same coordinate system. In static regions, identical pixels share the same 3D coordinates which provide an accurate matching relationship in 3D space, but in moving regions, the 3D coordinates are inconsistent for corresponding pixels across views, leading to ambiguous 3D matching relationships." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.317, + 0.222, + 0.332 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.348, + 0.485, + 0.71 + ], + "angle": 0, + "content": "Image-based 3D reconstruction is a fundamental task in computer vision with a wide range of applications including SLAM [39], robotics [19, 49], autonomous driving [53], and novel view synthesis [5]. While substantial progress has been achieved in static 3D reconstruction [16, 23, 26, 44, 51], dynamic scenes remain a major hurdle due to complexities like non-rigid motion and deformation, which may hamper the learning of local structure and camera motion, thereby complicating accurate 3D reconstruction for dynamic scenes. These scenarios require explicit modeling of both scene geometry and object motion. Moreover, downstream reconstruction tasks, such as 3D point tracking, demand precise geometry estimation and robust matching across views. To effectively distinguish dynamic regions, it is essential to establish reliable correspondences between different frames. Some pioneering works have attempted to address dynamic motion by incorporating additional auxiliary matching modules, such as optical flow [42, 52] or 2D tracking [47]. However, these approaches may suffer from domain gaps and accumulated errors between modules, limiting their effectiveness. A unified framework that seamlessly integrates geometry estimation and matching for dynamic 3D reconstruction remains a critical and underexplored challenge." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.715, + 0.483, + 0.866 + ], + "angle": 0, + "content": "Recently, DUSt3R [44] proposes a promising solution to address this challenge. It introduces the concept of a pointmap that assigns each pixel in an image to a corresponding 3D coordinate. The network utilizes a standard transformer-based encoder-decoder architecture and receives a pair of images as input. The system incorporates two parallel decoders to predict pointmaps for each view within the same coordinate system. However, this representation is limited to static matching and struggles in dynamic scenes, as illustrated in Fig. 2." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.871, + 0.483, + 0.902 + ], + "angle": 0, + "content": "To address this problem, we present POMATO, a unified network for dynamic 3D reconstruction by marrying" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.318, + 0.908, + 0.771 + ], + "angle": 0, + "content": "POintmap MMatching with Temporal mOtion. We argue that with iterative cross-attention modules across different views, matching features are well preserved in the decoder tokens. We thus introduce an auxiliary pointmap matching head to learn explicit correspondences. Specifically, for each pixel in the second view, the pointmap matching head predicts the corresponding 3D coordinates of its counterpart in the first view, under the shared coordinate system. Our proposed pointmap-based matching representation enables the establishment of explicit correspondences in 3D space, which can be directly leveraged for motion analysis, especially the estimation of dynamic regions. Moreover, we further extend our POMATO to handle 4D video sequences by introducing a temporal motion module that enhances the learning of temporal motions. This motion module promotes scale consistency across different frames and improves performance in tasks where both accurate geometry and reliable matching are paramount, most notably 3D point tracking. Compared with recent temporal 3D reconstruction methods [41, 43] based on an autoregression manner where the previous frames are blocked from the recently added frames, our temporal motion module is based on the self-attention mechanism along the temporal dimension, facilitating a comprehensive interaction across all frames. Our POMATO is trained in a two-stage manner. In the first stage, we used pairwise input images to learn fundamental geometry and matching capabilities. In the second stage, we extend the input to sequential video frames and incorporate the temporal motion module, enabling the model to effectively capture motions over time." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.909, + 0.902 + ], + "angle": 0, + "content": "Our contributions can be summarized in threefold: First, we propose a novel approach that unifies the fundamental geometry estimation and motion understanding for dynamic 3D reconstruction into a single network by incorporating the representation of pointmap matching. Second, we introduce a temporal motion module to facilitate the interactions of motion features along the temporal dimension, which significantly improves the performance in tasks" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.168 + ], + "angle": 0, + "content": "where both accurate geometry and precise matching are required for video sequential input, most notably 3D point tracking. Third, we demonstrate promising performance on 3D vision tasks, including video depth estimation, 3D point tracking, and camera pose estimation." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.181, + 0.233, + 0.197 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.206, + 0.485, + 0.904 + ], + "angle": 0, + "content": "Geometry estimation refers to the process of determining the spatial properties and structures from different forms of visual data. Direct recovery of 3D geometry from a single RGB image is by nature an ill-posed problem. Many recent works [3, 16, 23, 51] have tried to leverage strong pre-trained models to learn generalizable depthmaps from large-scale real and synthetic datasets to solve ambiguities. For example, Marigold [23], Geowizard [11], and GenPercept [48] aim at leveraging the generative priors from pre-trained diffusion models by finetuning them on synthetic datasets. Depthanything V2 [51] proposes to estimate scale-and-shift invariant disparity map by finetuning DINOV2 [29] model on synthetic datasets and largescale pseudo labels. Depth Pro [3] further propose a FOV head to estimate the metric depthmap from a single image without relying on camera intrinsics as input. Due to the scale ambiguity in the monocular depth estimation models, ChronoDepth [36], DepthCrafter [17], and Depth-any-video [50] proposes to learn temporal consistent depthmaps by leveraging the priors from a video generative model, i.e. SVD [2]. In another line of the research, multi-view stereo reconstruction (MVS) methods seek to reconstruct visible surfaces from multiple viewpoints. Traditional MVS [12] and SfM pipelines break the reconstruction pipeline into several sub-problems, e.g., feature extraction [8], image matching [1, 27], triangulation, and bundle adjustment [7]. The chain is complicated and accumulates noise for every single step, thus often resulting in unsatisfactory performance in complex real-world scenes. Recognizing the limitations of previous MVS methods, seminal work DUSt3R [44] proposes 3D pointmaps representation, and trains a network from large-scale data to regress the dense and accurate pointmaps from a pair of images. The camera intrinsics and relative camera poses can be implicitly inferred from the two-view pointmaps. However, it still can not handle reconstruction for dynamic scenes. MonST3R [52] directly finetuned the original DUSt3R model upon synthetic datasets that contain dynamic scenes. Motion representation. Optical flow is a commonly used representation for 2D motion. RAFT [38] is a representative work for pairwise optical flow estimation, which employs a 4D cost volume and recurrently estimates the optical flow. Some follow-up methods further extend it to multi-frame (3-5 frames) settings, which is still insufficient for long-range tracking. To resolve the problem, Particle Video [35] represent video motion by using a set of particles. Each" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.287 + ], + "angle": 0, + "content": "particle is an image point sample with a long-duration trajectory and other properties. Particle videos have two key advantages over optical flow: (1) persistence through occlusions, and (2) multi-frame temporal context. Some recent works, PIPs [15], TAPIR [9] and Cotracker [22] have renewed interest in this representation and show promising long-term 2D point tracking results. Recognizing the advantage of point representation, SpatialTracker [47] lifts the 2D points into 3D and performs tracking in the 3D space. Though it can handle occlusions and enhance 3D tracking accuracy, it still relies on a separate monocular depth estimator, which prevents it performing 3D point tracking in an end-to-end fashion." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.288, + 0.908, + 0.515 + ], + "angle": 0, + "content": "Multi-view dynamic reconstruction. Our work is closely connected to multi-view dynamic 3D reconstruction techniques. Early works [32, 34] take the straightforward idea that first pre-segment the scene into different regions, each corresponding to a single rigid part of an object, then apply the rigid-SfM technique to each of the regions. Some of the recent Neural Radiance Fields (NeRF) [28] and Gaussian Splatting [24] based methods have achieved state-of-the-art results. However, most of these methods require simultaneous multi-view video inputs or require predefined templates [18]. Shape of motion [42], proposes a new dynamic scene representation to represent the dynamic scene as a set of persistent 3D Gaussians, and optimize the representation from a monocular video by leveraging monocular depth estimation priors and 2D track estimates across frames." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.527, + 0.606, + 0.543 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.552, + 0.644, + 0.568 + ], + "angle": 0, + "content": "3.1. Preliminary" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.574, + 0.907, + 0.801 + ], + "angle": 0, + "content": "The overview of our POMATO is demonstrated in Fig.3. We adopt the definition of pointmap \\(\\mathbf{X} \\in \\mathbb{R}^{H \\times W \\times 3}\\) in DUSt3R [44] as a dense 2D field of 3D points where each point corresponds to its respective RGB pixel. Given a pair of input images \\(\\mathbf{I}^1, \\mathbf{I}^2 \\in \\mathbb{R}^{H \\times W \\times 3}\\) from two different views, a weight-sharing ViT first extracts the corresponding features \\(\\mathbf{F}^1, \\mathbf{F}^2\\) for each view. Two parallel branches are employed to decode the geometric structures and enhance the feature alignment via cross-attention in decoder modules, following a regression head to estimate pointmaps \\(\\mathbf{X}^{1,1}, \\mathbf{X}^{2,1} \\in \\mathbb{R}^{H \\times W \\times 3}\\) along with a confidence map \\(\\mathbf{C}^{1,1}, \\mathbf{C}^{2,1} \\in \\mathbb{R}^{H \\times W}\\) for each image view. Generally, \\(\\mathbf{X}^{n,m}\\) indicates the pointmap \\(\\mathbf{X}^n\\) from camera \\(n\\) expressed in camera \\(m\\)'s coordinate frame, which is obtained by a rigid transformation:" + }, + { + "type": "equation", + "bbox": [ + 0.624, + 0.827, + 0.905, + 0.845 + ], + "angle": 0, + "content": "\\[\n\\mathbf {X} ^ {n, m} = \\mathbf {P} _ {m} \\mathbf {P} _ {n} ^ {- 1} h \\left(\\mathbf {X} ^ {n}\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.855, + 0.908, + 0.903 + ], + "angle": 0, + "content": "where \\(\\mathbf{P}_m, \\mathbf{P}_n \\in \\mathbb{R}^{3 \\times 4}\\) are world-to-camera poses for camera \\(m\\) and camera \\(n\\), respectively, and \\(h(\\mathbf{X}^n)\\) is a homogeneous mapping for the 3D coordinate in camera coordinate" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.098, + 0.089, + 0.536, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.536, + 0.089, + 0.902, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.249, + 0.907, + 0.306 + ], + "angle": 0, + "content": "Figure 3. Overview of our training pipeline. (1) Stage I: build upon DUSt3R [44] architecture, we introduce a third regression point-matching head: \\(\\mathrm{Head}_3\\), which is in parallel to \\(\\mathrm{Head}_2\\) for explicit pointmap matching in 3D space. For each pixel in the second view, the output pointmap coordinate is the 3D point map of the corresponding pixel in the first view. (2) Stage II: we introduce a temporal fusion module in three heads that enables multi-style sequential input for learning temporal motions." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.318, + 0.179, + 0.331 + ], + "angle": 0, + "content": "of camera \\(n\\)" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.332, + 0.484, + 0.453 + ], + "angle": 0, + "content": "The task for Decoder 1 and its regression head estimate the 3D points for \\(\\mathbf{I}^1\\) in its own coordinate system while Decoder 2 and its regression head are responsible for estimating pixel-wise 3D coordinates for \\(\\mathbf{I}^2\\) in \\(\\mathbf{I}^1\\)'s coordinate system after a rigid transformation of global rotation and translation. In the following contents, we will first introduce our POMATO with pairwise input images and then extend it to the video sequence input with our temporal motion module." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.46, + 0.437, + 0.476 + ], + "angle": 0, + "content": "3.2. Pointmap Matching with Pairwise Input" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.482, + 0.484, + 0.619 + ], + "angle": 0, + "content": "As discussed before, the definition of \\(\\mathbf{X}^{2,1}\\) depicts a rigid camera transformation that is ambiguous to reflect explicit matching relationships for dynamic regions. To tackle this, we propose to formulate an explicit pointmap matching \\(\\mathbf{X}_m^{2,1} \\in \\mathbb{R}^{H \\times W \\times 3}\\) that maps dense RGB pixels of \\(\\mathbf{I}^2\\) to 3D coordinates of corresponding pixels in \\(\\mathbf{I}^1\\) under the first image's coordinate system. Given a 2D query pixel at \\((x_2, y_2)\\) in \\(\\mathbf{I}^2\\) and its corresponding pixel at \\((x_1, y_1)\\) in \\(\\mathbf{I}^1\\), the matched pointmap at \\((x_2, y_2)\\) in \\(\\mathbf{I}^2\\) is:" + }, + { + "type": "equation", + "bbox": [ + 0.187, + 0.623, + 0.482, + 0.641 + ], + "angle": 0, + "content": "\\[\n\\mathbf {X} _ {m} ^ {2, 1} \\left(x _ {2}, y _ {2}\\right) = \\mathbf {X} ^ {1, 1} \\left(x _ {1}, y _ {1}\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.646, + 0.484, + 0.827 + ], + "angle": 0, + "content": "where \\((x,y)\\) indicates the coordinates of 2D grid. For the representative dynamic point (red) in Fig. 2, the pointmap matching result is the 3D coordinate of point A in the coordinate system of the first image. As shown in Fig. 3, \\(\\mathbf{X}_m^{2,1}\\) and \\(\\mathbf{X}^{1,1}\\) are supposed to match perfectly in 3D space on the premise of neglecting occluded regions. We argue that the set of decoder tokens from the second branch preserves abundant matching information with iterative cross-attentions, so we introduce a matching head with the same architecture of \\(\\mathrm{Head}_1\\) and \\(\\mathrm{Head}_2\\). The supervision for pointmap matching \\(\\mathbf{X}_m^{2,1}\\) still follows the 3D regression loss which is defined as the Euclidean distance:" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.838, + 0.482, + 0.871 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {m}} = \\left\\| \\frac {1}{z _ {m}} \\mathbf {X} _ {m} ^ {2, 1} - \\frac {1}{\\bar {z} _ {m}} \\bar {\\mathbf {X}} _ {m} ^ {2, 1} \\right\\|, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.484, + 0.902 + ], + "angle": 0, + "content": "where \\(\\bar{\\mathbf{X}}_m^{2,1}\\) is the ground truth pointmap matching, which can be obtained following Eq. 2 on the 2D tracking dataset" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.318, + 0.907, + 0.379 + ], + "angle": 0, + "content": "with the depth and camera information. \\(z_{m},\\bar{z}_{m}\\) are the same norm factor defined in DUSt3R. The matching confidence \\(\\mathbf{C}_m^{2,1}\\) is also learned following the confidence loss for \\(\\mathrm{Head}_1\\) and \\(\\mathrm{Head}_2\\) within valid regions:" + }, + { + "type": "equation", + "bbox": [ + 0.608, + 0.391, + 0.905, + 0.41 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {m c o n f}} = \\mathbf {C} _ {m} ^ {2, 1} \\mathcal {L} _ {\\mathrm {m}} - \\alpha \\log \\mathbf {C} _ {m} ^ {2, 1} \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.411, + 0.906, + 0.487 + ], + "angle": 0, + "content": "The final loss \\(\\mathcal{L}\\) of our POMATO for pairwise input is a combination of predefined DUSt3R loss \\(\\mathcal{L}_{\\mathrm{DUSt3R}}\\), matching loss \\(\\mathcal{L}_{\\mathrm{m}}\\), and matching confidence loss \\(\\mathcal{L}_{\\mathrm{mconf}}\\). When training our POMATO for pairwise input images at the first stage, the parameters in the encoder are frozen." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.495, + 0.756, + 0.51 + ], + "angle": 0, + "content": "3.3. Dynamic Mask Estimation" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.517, + 0.907, + 0.64 + ], + "angle": 0, + "content": "Taking advantage of the explicit pointmap matching head, our POMATO can directly perform dynamic mask estimation without introducing an assistant module such as the optical flow model, getting rid of the additional computation cost and the potential domain gap. For an image pair \\(\\{\\mathbf{I}^i,\\mathbf{I}^j\\}\\) along with the estimation of \\(\\mathbf{X}^{j,i}\\) from \\(\\mathrm{Head}_2\\) and \\(\\mathbf{X}_{m}^{j,i}\\) from \\(\\mathrm{Head}_3\\), the dynamic mask \\(\\mathbf{D}^{j,i}\\) can be obtained by comparing the difference between \\(\\mathbf{X}^{j,i}\\) and \\(\\mathbf{X}_{m}^{j,i}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.615, + 0.647, + 0.905, + 0.665 + ], + "angle": 0, + "content": "\\[\n\\mathbf {D} ^ {j, i} = \\left| \\left| \\mathbf {X} _ {m} ^ {j, i} - \\mathbf {X} ^ {j, i} \\right| \\right| > \\alpha , \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.674, + 0.907, + 0.78 + ], + "angle": 0, + "content": "where \\(\\alpha\\) is a dynamic threshold defined as \\(3 \\times\\) median \\((\\|\\mathbf{X}_m^{j,i} - \\mathbf{X}^{j,i}\\|)\\). The explicit dynamic mask can be incorporated into the global alignment process to minimize the interference of moving objects for pose estimation and 3D reconstruction. Details on the incorporation of dynamic masks for global alignment are provided in the supplementary materials." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.788, + 0.749, + 0.804 + ], + "angle": 0, + "content": "3.4. Temporal Motion Module" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.81, + 0.906, + 0.901 + ], + "angle": 0, + "content": "With the fundamental capability of geometric estimation and pointmap matching for pairwise images, we follow [6] and extend our POMATO to 4D video sequences by inserting a transformer-based motion module into the vanilla DPT head to construct the \"temporal DPT head\", which is illustrated in Fig.4. For a set of decoder tokens \\(\\mathbf{G} \\in \\mathbb{R}^{B,T,N,C}\\)" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.132, + 0.088, + 0.442, + 0.284 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.297, + 0.484, + 0.34 + ], + "angle": 0, + "content": "Figure 4. Architecture of our temporal motion module. We insert a transformer-based motion module (in shallow yellow) into the vanilla DPT [33] head to enhance the temporal consistency." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.351, + 0.484, + 0.473 + ], + "angle": 0, + "content": "where \\(B, T, N, C\\) represent the batch size, window length of a video sequence, token number, and token dimension, respectively, we merge the token number dimension into the batch axis and apply the motion module which consists of two blocks of standard multi-head self-attention modules and feed-forward networks along the temporal dimension \\(T\\). To reduce the computation cost, the temporal motion modules are applied to features of low resolution." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.482, + 0.353, + 0.499 + ], + "angle": 0, + "content": "3.5. Downstream Temporal Tasks" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.504, + 0.484, + 0.641 + ], + "angle": 0, + "content": "Given a video sequence of \\(T\\) frames \\(\\mathbf{I}^{t_1},\\mathbf{I}^{t_2},\\ldots ,\\mathbf{I}^{t_T}\\), we construct a unique set of stereo image pairs for each task. As illustrated in Fig. 5, the flexible construction of input pairs—combined with the proposed temporal motion module and pointmap matching head—enables POMATO to seamlessly address downstream temporal tasks, including 3D point tracking, video depth estimation, and 3D reconstruction. The keyframe selection strategy and input formulation for each task are detailed in the following section." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.642, + 0.484, + 0.7 + ], + "angle": 0, + "content": "Besides the default regression losses for \\(\\mathrm{Head}_1\\) and \\(\\mathrm{Head}_2\\), and predefined losses Eq. 3 and Eq. 4 for \\(\\mathrm{Head}_3\\), we further employ a temporal consistency loss, \\(\\mathcal{L}_{\\mathrm{t}}\\), which will be described in detail below." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.702, + 0.483, + 0.762 + ], + "angle": 0, + "content": "In addition to the default regression losses for \\(\\mathrm{Head}_1\\) and \\(\\mathrm{Head}_2\\), and the predefined losses in Eq. 3 and Eq. 4 for \\(\\mathrm{Head}_3\\), we further introduce a temporal consistency loss, \\(\\mathcal{L}_{\\mathrm{t}}\\), which will also be described in detail below." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.762, + 0.484, + 0.902 + ], + "angle": 0, + "content": "3D Point Tracking. As illustrated at the top of Fig.5, the keyframe is set to the first image of the global video sequence and fed to the proposed \\(\\mathrm{Head}_3\\) to obtain the pointmap matching result of each query point (initialized at the first image) under the coordinate system of each reference frame \\(\\{\\mathbf{X}_{m}^{t_{1},t_{1}},\\mathbf{X}_{m}^{t_{1},t_{2}},\\mathbf{X}_{m}^{t_{1},t_{3}},\\dots \\mathbf{X}_{m}^{t_{1},t_{T}}\\}\\), while the set of reference frames \\(\\{\\mathbf{I}^{t_1},\\mathbf{I}^{t_2},\\mathbf{I}^{t_3},\\dots \\mathbf{I}^{t_T}\\}\\) is fed to the \\(\\mathrm{Head}_1\\) to obtain the pointmap under each ego coordinate system. The dense tracking results can be further sparsified by indexing the 2D coordinates. When inference on a video" + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.092, + 0.907, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.418, + 0.907, + 0.474 + ], + "angle": 0, + "content": "Figure 5. Inference pipelines for point tracking, video depth, and multi-view reconstruction. \\( t_k \\) indicates the keyframe. With the help of the motion module and flexible input construction, PO-MATO can be easily applied to downstream temporal tasks." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.486, + 0.907, + 0.543 + ], + "angle": 0, + "content": "longer than \\(T\\) frames, a simple sliding-window approach with an overlap of four frames is adopted to enhance the consistency between adjacent video windows. The temporal consistency loss \\(\\mathcal{L}_{\\mathrm{t}}\\) for tracking is:" + }, + { + "type": "equation", + "bbox": [ + 0.529, + 0.553, + 0.906, + 0.59 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {t}} = \\frac {1}{T} \\sum_ {i = 1} ^ {T} \\left\\| \\frac {\\mathbf {X} _ {m} ^ {t _ {1} , t _ {i}}}{z _ {m} ^ {T}} - \\frac {\\bar {\\mathbf {X}} _ {m} ^ {t _ {1} , t _ {i}}}{\\bar {z} _ {m} ^ {T}} \\right\\| + \\left\\| \\frac {\\mathbf {X} ^ {t _ {i} , t _ {i}}}{z ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\\bar {z} ^ {T}} \\right\\|, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.6, + 0.906, + 0.645 + ], + "angle": 0, + "content": "where \\(z_{m}^{T} = \\mathrm{norm}\\left(\\mathbf{X}_{m}^{t_{1},t_{1}},\\mathbf{X}_{m}^{t_{1},t_{2}},\\dots,\\mathbf{X}_{m}^{t_{1},t_{T}}\\right)\\) and \\(\\bar{z}_T =\\) norm \\((\\bar{\\mathbf{X}}_m^{t_1,t_1},\\bar{\\mathbf{X}}_m^{t_1,t_2},\\dots,\\bar{\\mathbf{X}}_m^{t_1,t_T})\\) . \\(z_{m}^{T}\\) and \\(\\bar{z}_T\\) are defined analogously." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.645, + 0.906, + 0.743 + ], + "angle": 0, + "content": "Video Depth Estimation. As shown in the middle part of the Fig. 5, the input video sequence is formulated to a set of identical image pairs \\(\\{(\\mathbf{I}^{t_1},\\mathbf{I}^{t_1}),(\\mathbf{I}^{t_2},\\mathbf{I}^{t_2}),\\dots,(\\mathbf{I}^{t_T},\\mathbf{I}^{t_T})\\}\\) and fed to \\(\\mathrm{Head}_1\\) and \\(\\mathrm{Head}_2\\), where the predictions from each head are identical: \\(\\{\\mathbf{X}^{t_1,t_1},\\mathbf{X}^{t_2,t_2},\\dots,\\mathbf{X}^{t_N,t_N}\\}\\). We use the output of \\(\\mathrm{Head}_1\\) as our final video depth estimation. The temporal consistency loss \\(\\mathcal{L}_{\\mathrm{t}}\\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.53, + 0.753, + 0.907, + 0.79 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {t}} = \\frac {1}{T} \\sum_ {i = 1} ^ {T} \\left\\| \\frac {\\mathbf {X} _ {1} ^ {t _ {i} , t _ {i}}}{z _ {1} ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\\bar {z} ^ {T}} \\right\\| + \\left\\| \\frac {\\mathbf {X} _ {2} ^ {t _ {i} , t _ {i}}}{z _ {2} ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\\bar {z} ^ {T}} \\right\\|, (7)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.8, + 0.906, + 0.831 + ], + "angle": 0, + "content": "where \\(\\mathbf{X}_1^{t_i,t_i}\\) and \\(\\mathbf{X}_2^{t_i,t_i}\\) indicate the output from Head_1 and Head_2, respectively. \\(\\bar{\\mathbf{X}}^{t_i,t_i}\\) is the pointmap groundtruth." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.831, + 0.906, + 0.901 + ], + "angle": 0, + "content": "3D Reconstruction. Assisted by the temporal motion module, redundant post-process operations such as global alignment can be omitted, allowing the reconstructed 3D point cloud to be obtained in a feed-forward manner. As shown in the bottom part of Fig.5, the keyframe is set to the last frame" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.133, + 0.089, + 0.861, + 0.276 + ], + "angle": 0, + "content": "
AlignmentMethodOptim. Onl.Sintel [4]BONN [30]KITTI [13]
Abs Rel ↓δ<1.25 ↑Abs Rel ↓δ<1.25 ↑Abs Rel ↓δ<1.25 ↑
Per-sequence scaleDUSt3R-GA [44]0.65645.20.15583.30.14481.3
MASt3R-GA [26]0.64143.90.25270.10.18374.5
MonST3R-GA [52]0.37855.80.06796.30.16874.4
Spann3R [41]0.62242.60.14481.30.19873.7
CUT3R [43]0.42147.90.07893.70.11888.1
POMATO0.41653.60.07496.10.08593.3
Per-sequence scale & shiftMonST3R-GA [52]0.33558.50.06396.40.10489.5
CUT3R [43]0.46656.20.11188.30.07594.3
POMATO0.34557.90.07296.50.08493.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.281, + 0.907, + 0.325 + ], + "angle": 0, + "content": "Table 1. Video depth evaluation. We report scale-invariant depth and scale & shift invariant depth accuracy on Sintel [4], Bonn [30], and KITTI [13] datasets. Methods requiring global alignment are marked “GA”, while “Optim.” and “Onl.” indicate optimization-based and online methods, respectively. The best and second best results in each category are bold and underlined, respectively." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.35, + 0.483, + 0.435 + ], + "angle": 0, + "content": "\\(\\mathbf{I}^{t_T}\\) within the temporal window of length \\(T\\) and is fed to \\(\\mathrm{Head}_1\\) with a set output of \\(\\{\\mathbf{X}^{t_T,t_T},\\mathbf{X}^{t_T,t_T},\\dots,\\mathbf{X}^{t_T,t_T}\\}\\). All the reference frames are input to the \\(\\mathrm{Head}_2\\) so the target pointmaps \\(\\{\\mathbf{X}^{t_1,t_T},\\mathbf{X}^{t_2,t_T},\\dots,\\mathbf{X}^{t_T,t_T}\\}\\) are aligned under the coordinate system of the keyframe. The temporal consistency loss \\(\\mathcal{L}_{\\mathrm{t}}\\) is:" + }, + { + "type": "equation", + "bbox": [ + 0.1, + 0.446, + 0.483, + 0.481 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {t}} = \\frac {1}{T} \\sum_ {i = 1} ^ {T} \\left\\| \\frac {\\mathbf {X} ^ {t _ {T} , t _ {T}}}{z _ {1} ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {T} , t _ {T}}}{\\bar {z} _ {1} ^ {T}} \\right\\| + \\left\\| \\frac {\\mathbf {X} ^ {t _ {i} , t _ {T}}}{z _ {2} ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {i} , t _ {T}}}{\\bar {z} _ {2} ^ {T}} \\right\\| \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.491, + 0.484, + 0.567 + ], + "angle": 0, + "content": "We further freeze the parameters in Decoder1 and Decoder2 when training the temporal downstream tasks at the second stage. In our work, the temporal window length \\( T \\) is set to 12. Additional explorations on the temporal length can be found in Sec.4." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.582, + 0.224, + 0.599 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.607, + 0.291, + 0.623 + ], + "angle": 0, + "content": "4.1. Experimental Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.629, + 0.483, + 0.856 + ], + "angle": 0, + "content": "Training data. We train our network with a mixture of five datasets: PointOdyssey [54], Tartanair [45], ParallelDomain4D [40], DynamicReplica [21] and Carla (0.9.15) [10]. The specific number and the usage ratio of each dataset can be found in the supplementary materials. All datasets include pixel-accurate ground truth depth, as well as camera intrinsics and extrinsics, and encompass a wide variety of dynamic scenes across both indoor and outdoor environments. Among them, PointOdyssey and DynamicReplica have additional 2D trajectory annotations for dynamic objects which can be used to construct pointmap matching ground truth following Eq. 2. All datasets are used to supervise geometry learning on \\(\\mathrm{Head}_1\\) and \\(\\mathrm{Head}_2\\), while only PointOdyssey, DynamicReplica, and TartanAir are used to train the proposed pointmap matching head." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.856, + 0.484, + 0.901 + ], + "angle": 0, + "content": "Training and inference details. Our model architecture is based on the publicly available DUSt3R [52] model, utilizing the same backbone consisting of a ViT-Large encoder" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.35, + 0.907, + 0.593 + ], + "angle": 0, + "content": "and a ViT-Base decoder. To fully leverage MonST3R's geometry estimation capabilities in dynamic scenes, we initialize our model using the publicly available MonST3R checkpoint. For the newly introduced pointmap matching head, we initialize its weights from the pretrained \\(\\mathrm{Head}_2\\) weights of MonST3R. The temporal motion module is initialized following [14]. We train our network for 10 epochs with a cosine learning rate schedule, with an initial learning rate of 1e-4. In the first stage, which involves pairwise training, we use a batch size of 16 on 4 A100 GPUs (40G). In the second stage, where the temporal motion module is introduced, the batch size is set to 4 with a fixed temporal window length of 12. During each training iteration, we randomly sample a downstream task—3D point tracking, video depth estimation, or 3D reconstruction—to construct the input pairs and apply the corresponding loss function." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.606, + 0.734, + 0.621 + ], + "angle": 0, + "content": "4.2. Video Depth Estimation" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.629, + 0.907, + 0.901 + ], + "angle": 0, + "content": "Following MonST3R [52] and CUT3R [43], we rescale all predictions from the same video to align them together by conducting two forms of alignment: per-sequence scale and shift alignment and per-sequence scale alignment. Thus, we can measure the per-frame depth quality and inter-frame depth consistency. We employ our proposed motion module for video depth estimation in a feed-forward manner as described in Sec.3.5 and compare our method against several variants of DUSt3R, including DUSt3R [44], MAST3R [26], MonST3R [52], Spann3R [41], and CUT3R [43]. Given 6 frames of \\(288 \\times 512\\) on an NVIDIA 4070 GPU, POMATO reconstructs the 3D point cloud in 0.7 seconds, whereas global alignment-based methods such as MonST3R require 5.8 seconds. As shown in Tab. 1, our method demonstrates comparable performance to the global alignment (GA)-based MonST3R [52] on the Sintel [4] and BONN [30] datasets, while surpassing it on KITTI dataset. Besides, we" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.207, + 0.094, + 0.788, + 0.203 + ], + "angle": 0, + "content": "
MethodPointOdyssey [54]ADT [31]PStudio [20]Average
L-12L-24L-12L-24L-12L-24L-12L-24
SpatialTracker* [47]20.4620.7121.6420.6730.4125.8724.1722.42
DUSt3R [44]19.0319.0329.0225.559.726.5019.2617.03
MASt3R [26]16.5817.3527.3626.4611.788.0918.5717.30
MonST3R [52]27.3127.9228.3026.1316.5011.0624.0321.70
POMATO33.2033.5831.5728.2224.5919.7929.7927.20
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.209, + 0.907, + 0.237 + ], + "angle": 0, + "content": "Table 2. 3D tracking evaluation. We report the APD metric to evaluate 3D point tracking on the PointOdyssey [54], ADT [31], and PStudio [20] datasets. L-12 and L-24 indicate tracking within the temporal length of 12 frames and 24 frames, respectively." + }, + { + "type": "image", + "bbox": [ + 0.113, + 0.25, + 0.889, + 0.502 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.513, + 0.907, + 0.542 + ], + "angle": 0, + "content": "Figure 6. Qualitative comparison of dynamic scenes. Compared to MonST3R, our POMATO can provide more reliable motion masks, 3D point tracking, and reconstruction performance." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.568, + 0.483, + 0.645 + ], + "angle": 0, + "content": "consistently outperform the state-of-the-art online method, CUT3R [43], across various settings. These results underscore the effectiveness of our approach, specifically (1) the joint learning of geometry and pointmap matching, and (2) the temporal motion module." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.652, + 0.27, + 0.669 + ], + "angle": 0, + "content": "4.3. 3D Point Tracking" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.675, + 0.484, + 0.901 + ], + "angle": 0, + "content": "For 3D point tracking task, we use the Aria Digital Twin (ADT) [31], and Panoptic Studio (PStudio) [20] benchmarks from the TAPVid-3D [25] dataset along with the validation set on the PointOdyssey [54] dataset. We report the Average Percent Deviation (APD) metric, which quantifies the average percentage of points within a threshold relative to the ground truth depth. The APD metric serves as a direct measure of the accuracy of the predicted tracking. We reformulate the datasets and project all the query points within a temporal window to the first frame. We report tracking results on the length of 12 and 24 frames. As shown in Tab.2, our POMATO achieves the best performance on both PointOdyssey and ADT datasets. It's worth mentioning that SpatialTracker [47] is a state-of-the-art network tailored for 3D point tracking with ground truth camera intrinsic as ad" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.568, + 0.907, + 0.676 + ], + "angle": 0, + "content": "ditional input data. POMATO surpasses it on two datasets and improves the average APD metric by \\(23.3\\%\\) and \\(21.4\\%\\) for 12 frames and 24 frames, respectively. For DUSt3R-based methods, we use the output of \\(\\mathrm{Head}_2\\) as tracking results. Obviously, the ambiguous matching representation limits its capability to handle this fine-grained 3D reconstruction task in dynamic scenes." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.683, + 0.741, + 0.698 + ], + "angle": 0, + "content": "4.4. Camera Pose Estimation" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.704, + 0.907, + 0.901 + ], + "angle": 0, + "content": "Following DUSt3R-based methods, we perform global alignment with the model trained in the first stage on the Bonn [30] and TUM [37] datasets. The sampling stride is set to 5 for the Bonn dataset and 3 for the TUM dataset. Compared with optical-flow assisted global alignment in MonST3R, the dynamic mask is computed according to Eq. 5 while the 2D pseudo label is replaced by projecting the pointmap matching results to 2D coordinates with estimated camera intrinsic. Absolute Translation Error (ATE), Relative Translation Error (RPE trans), and Relative Rotation Error (RPE rot) are reported. The evaluation results over 40 frames are reported in Tab. 4. Notably, POMATO obtains an overall state-of-the-art performance and signifi" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.176, + 0.089, + 0.825, + 0.187 + ], + "angle": 0, + "content": "
Temporal LengthVideo DepthTracking (12 Frames)
Sintel [4]Bonn [30]KITTI [13]PointOdyssey [54] ADT [31] PStudio [20]
Abs Rel ↓ δ<1.25 ↑Abs Rel ↓ δ<1.25 ↑Abs Rel ↓ δ<1.25 ↑Abs Rel ↓ δ<1.25 ↑APD↑APD↑APD↑
Pair-wise0.54846.20.08794.00.11389.532.0629.8723.10
6 frames0.43651.30.07695.90.08593.532.6930.9324.52
12 frames0.41653.60.07596.10.08693.333.2031.5724.59
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.198, + 0.907, + 0.227 + ], + "angle": 0, + "content": "Table 3. Ablation study on the temporal motion module. The introduction of the temporal motion module brings a significant improvement. As the temporal window length enlarges from 6 frames to 12 frames, we obtain an overall consistent improvement." + }, + { + "type": "table", + "bbox": [ + 0.106, + 0.25, + 0.472, + 0.338 + ], + "angle": 0, + "content": "
MethodTUM [37]Bonn [30]
ATE ↓RPE trans ↓RPE rot ↓ATE ↓RPE trans ↓RPE rot ↓
DUSt3R [44]0.0250.0132.3610.0300.0252.522
MASt3R [26]0.0270.0151.9100.0310.0252.478
MonST3R [52]0.0210.0061.1420.0250.0212.120
CUT3R [43]0.0230.0160.5100.0280.0332.569
POMATO0.0200.0100.5090.0370.0161.782
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.348, + 0.483, + 0.378 + ], + "angle": 0, + "content": "Table 4. Pose estimation. Our method achieves an overall best performance and improves the RPE rot metric significantly." + }, + { + "type": "image", + "bbox": [ + 0.104, + 0.392, + 0.221, + 0.445 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.104, + 0.446, + 0.22, + 0.498 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.104, + 0.5, + 0.22, + 0.552 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.128, + 0.553, + 0.197, + 0.565 + ], + "angle": 0, + "content": "Input Images" + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.393, + 0.472, + 0.46 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.46, + 0.472, + 0.547 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.248, + 0.549, + 0.451, + 0.559 + ], + "angle": 0, + "content": "3D Reconstruction with our Pointmap Matching." + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.579, + 0.483, + 0.622 + ], + "angle": 0, + "content": "Figure 7. Effectiveness of our pointmap matching head. Without explicitly filtering out the motion area, both pose and geometry estimation will be degraded." + }, + { + "type": "table", + "bbox": [ + 0.106, + 0.638, + 0.47, + 0.693 + ], + "angle": 0, + "content": "
MethodBonn [30]PointOdyssey [54]ADT [31]PStudio [20]
ATE ↓RPE trans ↓RPE rot ↓APD ↑APD ↑APD ↑
W/O Head30.0400.0151.72129.1029.6216.94
W/ Head30.0370.0161.78232.0629.8723.10
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.703, + 0.483, + 0.745 + ], + "angle": 0, + "content": "Table 5. Ablation study on the effectiveness of the pointmap matching head. The comparisons are reported on the pose estimation and 3D point tracking tasks." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.774, + 0.483, + 0.805 + ], + "angle": 0, + "content": "cantly improves the RPE-rot metric, surpassing MonST3R by \\(55.4\\%\\) and \\(13.3\\%\\) on the TUM and Bonn datasets." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.818, + 0.245, + 0.833 + ], + "angle": 0, + "content": "4.5. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.84, + 0.484, + 0.902 + ], + "angle": 0, + "content": "We conduct extensive ablation studies to evaluate the effectiveness of the temporal motion module and the proposed pointmap matching head. As shown in Table 3, we report results for three models: one trained with only pairwise" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.253, + 0.908, + 0.587 + ], + "angle": 0, + "content": "images (first-stage training), one using a shorter temporal window of 6 frames, and another using the default temporal window length of 12 frames. Incorporating temporal consistency yields substantial improvements across all datasets for video depth estimation and 3D point tracking. Further improvement is achieved when the temporal window length increases from 6 frames to 12 frames. In Table 5, we evaluate the effectiveness of the pointmap matching head. While it introduces only a modest improvement in the ATE metric, we attribute this to the limited motion and minimal viewpoint variation in the indoor evaluation dataset. As illustrated in Fig. 7, under challenging in-the-wild conditions with significant motion and rapid viewpoint changes, removing the pointmap matching head introduces ambiguity in explicit rigid transformation estimation, resulting in a clear degradation in performance. To further demonstrate the impact of the pointmap matching head on 3D point tracking, we conduct tracking experiments over 12 frames using the pairwise input setup. Clearly, removing the pointmap matching head (using only \\(\\mathrm{Head}_2\\)) leads to an inevitable performance drop, emphasizing explicit correspondence modeling for reliable long-term tracking." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.599, + 0.763, + 0.615 + ], + "angle": 0, + "content": "5. Discussion and Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.624, + 0.907, + 0.85 + ], + "angle": 0, + "content": "We introduce POMATO, a unified framework for geometry estimation and motion understanding in dynamic scenes. By leveraging the proposed pointmap matching head, our method effectively distinguishes moving regions, thereby mitigating the interference introduced by dynamic objects. The temporal motion module further facilitates the learning of temporal dynamics across frames, enhancing scale consistency and improving performance in tasks where both geometry and matching are critical—most notably, 3D point tracking. The downstream temporal tasks including 3D point tracking, video depth estimation, and 3D reconstruction can be easily applied in a feed-forward manner. In future work, we plan to scale up training with more dynamic reconstruction and matching datasets to further enhance 3D reconstruction and tracking performance." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.851, + 0.906, + 0.896 + ], + "angle": 0, + "content": "Acknowledgement. This work was supported by the National Natural Science Foundation of China (No. 62206244)" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.093, + 0.09, + 0.188, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.115, + 0.484, + 0.17 + ], + "angle": 0, + "content": "[1] Daniel Barath, Dmytro Mishkin, Luca Cavalli, Paul-Edouard Sarlin, Petr Hruby, and Marc Pollefeys. Affineglue: Joint matching and robust estimation. arXiv preprint arXiv:2307.15381, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.171, + 0.484, + 0.253 + ], + "angle": 0, + "content": "[2] Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendelevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, Varun Jampani, and Robin Rombach. Stable video diffusion: Scaling latent video diffusion models to large datasets. abs/2311.15127, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.256, + 0.483, + 0.31 + ], + "angle": 0, + "content": "[3] Aleksei Bochkovskii, Amael Delaunoy, Hugo Germain, Marcel Santos, Yichao Zhou, Stephan R. Richter, and Vladlen Koltun. Depth pro: Sharp monocular metric depth in less than a second. arXiv, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.312, + 0.483, + 0.365 + ], + "angle": 0, + "content": "[4] Daniel J. Butler, Jonas Wulff, Garrett B. Stanley, and Michael J. Black. A naturalistic open source movie for optical flow evaluation. In ECCV, pages 611-625, 2012. 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.368, + 0.483, + 0.395 + ], + "angle": 0, + "content": "[5] Ang Cao and Justin Johnson. Hexplane: A fast representation for dynamic scenes. CVPR, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.397, + 0.483, + 0.451 + ], + "angle": 0, + "content": "[6] Sili Chen, Hengkai Guo, Shengnan Zhu, Feihu Zhang, Zi long Huang, Jiashi Feng, and Bingyi Kang. Video depth anything: Consistent depth estimation for super-long videos. arXiv preprint arXiv:2501.12375, 2025. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.453, + 0.483, + 0.479 + ], + "angle": 0, + "content": "[7] Yu Chen, Yisong Chen, and Guoping Wang. Bundle adjustment revisited. arXiv preprint arXiv: 1912.03858, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.481, + 0.483, + 0.548 + ], + "angle": 0, + "content": "[8] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 224-236, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.551, + 0.483, + 0.633 + ], + "angle": 0, + "content": "[9] Carl Doersch, Yi Yang, Mel Vecerik, Dilara Gokay, Ankush Gupta, Yusuf Aytar, Joao Carreira, and Andrew Zisserman. Tapir: Tracking any point with per-frame initialization and temporal refinement. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10061-10072, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.635, + 0.483, + 0.689 + ], + "angle": 0, + "content": "[10] Alexey Dosovitskiy, German Ros, Felipe Codevilla, Antonio Lopez, and Vladlen Koltun. Carla: An open urban driving simulator. In Conference on robot learning, pages 1-16. PMLR, 2017. 6, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.691, + 0.483, + 0.758 + ], + "angle": 0, + "content": "[11] Xiao Fu, Wei Yin, Mu Hu, Kaixuan Wang, Yuexin Ma, Ping Tan, Shaojie Shen, Dahua Lin, and Xiaoxiao Long. Geowizard: Unleashing the diffusion priors for 3d geometry estimation from a single image. arXiv preprint arXiv: 2403.12013, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.761, + 0.483, + 0.802 + ], + "angle": 0, + "content": "[12] Yasutaka Furukawa, Carlos Hernández, et al. Multi-view stereo: A tutorial. Foundations and Trends® in Computer Graphics and Vision, 9(1-2):1-148, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.804, + 0.483, + 0.844 + ], + "angle": 0, + "content": "[13] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In CVPR, pages 3354-3361, 2012. 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.846, + 0.483, + 0.901 + ], + "angle": 0, + "content": "[14] Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff:Animate your personalized text-to-image diffusion models without specific tuning, 2023.6" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.115, + 0.484, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.135 + ], + "angle": 0, + "content": "[15] Adam W. Harley, Zhaoyuan Fang, and Katerina Fragkiadaki. Particle video revisited: Tracking through occlusions using point trajectories. In ECCV, pages 59-75. Springer, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.136, + 0.905, + 0.217 + ], + "angle": 0, + "content": "[16] Mu Hu, Wei Yin, Chi Zhang, Zhipeng Cai, Xiaoxiao Long, Hao Chen, Kaixuan Wang, Gang Yu, Chunhua Shen, and Shaojie Shen. Metric3d v2: A versatile monocular geometric foundation model for zero-shot metric depth and surface normal estimation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.219, + 0.905, + 0.272 + ], + "angle": 0, + "content": "[17] Wenbo Hu, Xiangjun Gao, Xiaoyu Li, Sijie Zhao, Xiaodong Cun, Yong Zhang, Long Quan, and Ying Shan. Depthcrafter: Generating consistent long depth sequences for open-world videos. In CVPR, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.274, + 0.905, + 0.342 + ], + "angle": 0, + "content": "[18] Mustafa Işık, Martin Rünz, Markos Georgopoulos, Taras Khakhulin, Jonathan Starck, Lourdes Agapito, and Matthias Nießner. Humanrf: High-fidelity neural radiance fields for humans in motion. ACM Transactions on Graphics (TOG), 42(4):1-12, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.344, + 0.905, + 0.398 + ], + "angle": 0, + "content": "[19] Muhammad Zubair Irshad, Mauro Comi, Yen-Chen Lin, Nick Heppert, Abhinav Valada, Rares Ambrus, Zsolt Kira, and Jonathan Tremblay. Neural fields in robotics: A survey. arXiv preprint arXiv: 2410.20220, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.4, + 0.905, + 0.482 + ], + "angle": 0, + "content": "[20] Hanbyul Joo, Tomas Simon, Xulong Li, Hao Liu, Lei Tan, Lin Gui, Sean Banerjee, Timothy Scott Godisart, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh. Panoptic studio: A massively multiview system for social interaction capture. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2017. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.484, + 0.905, + 0.537 + ], + "angle": 0, + "content": "[21] Nikita Karaev, Ignacio Rocco, Benjamin Graham, Natalia Neverova, Andrea Vedaldi, and Christian Rupprecht. Dynamic stereo: Consistent dynamic depth from stereo videos. CVPR, 2023. 6, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.539, + 0.905, + 0.591 + ], + "angle": 0, + "content": "[22] Nikita Karaev, Ignacio Rocco, Benjamin Graham, Natalia Neverova, Andrea Vedaldi, and Christian Rupprecht. Co-tracker: It is better to track together. In Proc. ECCV, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.595, + 0.905, + 0.675 + ], + "angle": 0, + "content": "[23] Bingxin Ke, Anton Obukhov, Shengyu Huang, Nando Metzger, Rodrigo Caye Daudt, and Konrad Schindler. Repurposing diffusion-based image generators for monocular depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.678, + 0.905, + 0.719 + ], + "angle": 0, + "content": "[24] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM TOG, 42(4):139-1, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.721, + 0.905, + 0.775 + ], + "angle": 0, + "content": "[25] Skanda Koppula, Ignacio Rocco, Yi Yang, Joe Heyward, João Carreira, Andrew Zisserman, Gabriel Brostow, and Carl Doersch. Tapvid-3d: A benchmark for tracking any point in 3d. arXiv preprint arXiv: 2407.05921, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.777, + 0.905, + 0.816 + ], + "angle": 0, + "content": "[26] Vincent Leroy, Yohann Cabon, and Jérôme Revaud. Grounding image matching in 3d with mast3r. European Conference on Computer Vision, 2024. 2, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.818, + 0.905, + 0.872 + ], + "angle": 0, + "content": "[27] Philipp Lindenberger, Paul-Edouard Sarlin, and Marc Pollefeys. Lightglue: Local feature matching at light speed. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17627-17638, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.874, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[28] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf:" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.125, + 0.093, + 0.482, + 0.12 + ], + "angle": 0, + "content": "Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.122, + 0.483, + 0.177 + ], + "angle": 0, + "content": "[29] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, and Marc Szafraniec et al. DINOv2: Learning robust visual features without supervision. Trans. Mach. Learn. Research, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.179, + 0.482, + 0.261 + ], + "angle": 0, + "content": "[30] Emanuele Palazzolo, Jens Behley, Philipp Lottes, Philippe Giguere, and Cyril Stachniss. Refusion: 3d reconstruction in dynamic environments for rgb-d cameras exploiting residuals. In 2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 7855-7862. IEEE, 2019. 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.264, + 0.482, + 0.332 + ], + "angle": 0, + "content": "[31] Xiaqing Pan, Nicholas Charron, Yongqian Yang, Scott Peters, Thomas Whelan, Chen Kong, Omkar Parkhi, Richard Newcombe, and Carl Yuheng Ren. Aria digital twin: A new benchmark dataset for egocentric 3d machine perception. arXiv preprint arXiv: 2306.06362, 2023. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.334, + 0.482, + 0.402 + ], + "angle": 0, + "content": "[32] Rene Ranftl, Vibhav Vineet, Qifeng Chen, and Vladlen Koltun. Dense monocular depth estimation in complex dynamic scenes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4058-4066, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.405, + 0.483, + 0.461 + ], + "angle": 0, + "content": "[33] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In Proceedings of the IEEE/CVF international conference on computer vision, pages 12179-12188, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.462, + 0.482, + 0.517 + ], + "angle": 0, + "content": "[34] Chris Russell, Rui Yu, and Lourdes Agapito. Video popuup: Monocular 3d reconstruction of dynamic scenes. In European conference on computer vision, pages 583-598. Springer, 2014. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.519, + 0.482, + 0.56 + ], + "angle": 0, + "content": "[35] Peter Sand and Seth Teller. Particle video: Long-range motion estimation using point trajectories. International journal of computer vision, 80:72-91, 2008. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.562, + 0.482, + 0.616 + ], + "angle": 0, + "content": "[36] Jiahao Shao, Yuanbo Yang, Hongyu Zhou, Youmin Zhang, Yujun Shen, Matteo Poggi, and Yiyi Liao. Learning temporally consistent video depth from video diffusion priors. abs/2406.01493, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.619, + 0.482, + 0.672 + ], + "angle": 0, + "content": "[37] Jürgen Sturm, Nikolas Engelhard, Felix Endres, Wolfram Burgard, and Daniel Cremers. A benchmark for the evaluation of RGB-D SLAM systems. pages 573-580, 2012. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.676, + 0.482, + 0.743 + ], + "angle": 0, + "content": "[38] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part II 16, pages 402–419. Springer, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.746, + 0.482, + 0.788 + ], + "angle": 0, + "content": "[39] Zachary Teed and Jia Deng. Droid-slam: Deep visual slam for monocular, stereo, and rgb-d cameras. Neural Information Processing Systems, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.789, + 0.482, + 0.857 + ], + "angle": 0, + "content": "[40] Basile Van Hoorick, Rundi Wu, Ege Ozguroglu, Kyle Sargent, Ruoshi Liu, Pavel Tokmakov, Achal Dave, Changxi Zheng, and Carl Vondrick. Generative camera dolly: Extreme monocular dynamic novel view synthesis. arXiv preprint arXiv:2405.14868, 2024. 6, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.859, + 0.482, + 0.899 + ], + "angle": 0, + "content": "[41] Hengyi Wang and Lourdes Agapito. 3d reconstruction with spatial memory. arXiv preprint arXiv:2408.16061, 2024. 2, 6" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.093, + 0.483, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.147 + ], + "angle": 0, + "content": "[42] Qianqian Wang, Vickie Ye, Hang Gao, Jake Austin, Zhengqi Li, and Angjoo Kanazawa. Shape of motion: 4d reconstruction from a single video. arXiv preprint arXiv:2407.13764, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.149, + 0.905, + 0.191 + ], + "angle": 0, + "content": "[43] Qianqian Wang, Yifei Zhang, Aleksander Holynski, Alexei A. Efros, and Angjoo Kanazawa. Continuous 3d perception model with persistent state, 2025. 2, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.193, + 0.905, + 0.247 + ], + "angle": 0, + "content": "[44] Shuzhe Wang, Vincent Leroy, Yohann Cabon, Boris Chidlovskii, and Jerome Revaud. DUSt3R: Geometric 3D vision made easy. In CVPR, pages 20697-20709, 2024. 2, 3, 4, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.249, + 0.905, + 0.304 + ], + "angle": 0, + "content": "[45] Wenshan Wang, Delong Zhu, Xiangwei Wang, Yaoyu Hu, Yuheng Qiu, Chen Wang, Yafei Hu, Ashish Kapoor, and Sebastian Scherer. TartanAir: A dataset to push the limits of visual SLAM. pages 4909-4916, 2020. 6, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.305, + 0.905, + 0.346 + ], + "angle": 0, + "content": "[46] Yihan Wang, Lahav Lipson, and Jia Deng. SEA-RAFT: Simple, efficient, accurate RAFT for optical flow. In ECCV, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.348, + 0.905, + 0.417 + ], + "angle": 0, + "content": "[47] Yuxi Xiao, Qianqian Wang, Shangzhan Zhang, Nan Xue, Sida Peng, Yujun Shen, and Xiaowei Zhou. Spatialtracker: Tracking any 2d pixels in 3d space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.419, + 0.905, + 0.473 + ], + "angle": 0, + "content": "[48] Guangkai Xu, Yongtao Ge, Mingyu Liu, Chengxiang Fan, Kangyang Xie, Zhiyue Zhao, Hao Chen, and Chunhua Shen. Diffusion models trained with large data are transferable visual models. arXiv preprint arXiv: 2403.06090, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.476, + 0.905, + 0.53 + ], + "angle": 0, + "content": "[49] Yueming Xu, Haochen Jiang, Zhongyang Xiao, Jianfeng Feng, and Li Zhang. Dg-slam: Robust dynamic gaussian splatting slam with hybrid pose optimization. arXiv preprint arXiv: 2411.08373, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.532, + 0.905, + 0.587 + ], + "angle": 0, + "content": "[50] Honghui Yang, Di Huang, Wei Yin, Chunhua Shen, Haifeng Liu, Xiaofei He, Binbin Lin, Wanli Ouyang, and Tong He. Depth any video with scalable synthetic data. arXiv preprint arXiv:2410.10815, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.589, + 0.905, + 0.63 + ], + "angle": 0, + "content": "[51] Lihe Yang, Bingyi Kang, Zilong Huang, Zhen Zhao, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything v2. arXiv:2406.09414, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.632, + 0.905, + 0.699 + ], + "angle": 0, + "content": "[52] Junyi Zhang, Charles Herrmann, Junhwa Hur, Varun Jampani, Trevor Darrell, Forrester Cole, Deqing Sun, and Ming-Hsuan Yang. Monst3r: A simple approach for estimating geometry in the presence of motion. arXiv preprint arxiv:2410.03825, 2024. 2, 3, 6, 7, 8, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.702, + 0.905, + 0.77 + ], + "angle": 0, + "content": "[53] Guosheng Zhao, Chaojun Ni, Xiaofeng Wang, Zheng Zhu, Xueyang Zhang, Yida Wang, Guan Huang, Xinze Chen, Boyuan Wang, Youyi Zhang, Wenjun Mei, and Xingang Wang. Drivedreamer4d: World models are effective data machines for 4d driving scene representation. 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.772, + 0.905, + 0.827 + ], + "angle": 0, + "content": "[54] Yang Zheng, Adam W. Harley, Bokui Shen, Gordon Wetzstein, and Leonidas J. Guibas. PointOdyssey: A large-scale synthetic dataset for long-term point tracking. In ICCV, 2023. 6, 7, 8, 2" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.827 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "title", + "bbox": [ + 0.167, + 0.086, + 0.833, + 0.131 + ], + "angle": 0, + "content": "POMATO: Marrying Pointmap Matching with Temporal Motions for Dynamic 3D Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.142, + 0.615, + 0.163 + ], + "angle": 0, + "content": "Supplementary Material" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.179, + 0.476, + 0.196 + ], + "angle": 0, + "content": "A. Pointmap Matching for Global Alignment." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.207, + 0.485, + 0.466 + ], + "angle": 0, + "content": "Given a sequence of video frames, the target of global alignment is to project all pairwise estimated pointmaps to the same global world coordinates. DUSt3R constructs a connectivity pairwise graph and aims to minimize the reprojection error for each image pair globally where the dynamic regions are supposed to be separated from the static regions. To this end, MonST3R [52] further introduces an assistant optical flow network [46] to help mask the dynamic regions and provide a pseudo label of 2D matching for minimizing the re-projection error in static regions. However, the introduced assistant model will introduce inevitable domain gaps and additional computation costs. Besides, the optical flow model is tailored for matching within two adjacent frames, suffering an obvious degeneration with the large view displacement. In POMATO, for an image pair \\(\\{\\mathbf{I}^i,\\mathbf{I}^j\\}\\), the dynamic mask \\(\\mathbf{D}^{j,i}\\) is calculated by comparing the difference between \\(\\mathbf{X}^{j,i}\\) and \\(\\mathbf{X}_m^{j,i}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.192, + 0.48, + 0.483, + 0.498 + ], + "angle": 0, + "content": "\\[\n\\mathbf {D} ^ {j, i} = \\left| \\left| \\mathbf {X} _ {m} ^ {j, i} - \\mathbf {X} ^ {j, i} \\right| \\right| > \\alpha , \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.515, + 0.483, + 0.545 + ], + "angle": 0, + "content": "where \\(\\alpha\\) is a dynamic threshold defined as \\(3 \\times\\) median \\((\\|\\mathbf{X}_m^{j,i} - \\mathbf{X}^{j,i}\\|)\\)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.547, + 0.483, + 0.669 + ], + "angle": 0, + "content": "Given the updated camera intrinsic \\(\\tilde{K}\\) after an iteration of optimization, the target matching 2D coordinates \\(\\mathbf{F}_m^{j,i} \\in \\mathbb{R}^{H \\times W \\times 2}\\) can be calculated as \\(\\mathbf{F}_m^{j,i} = p(\\tilde{\\mathbf{K}}\\mathbf{X}_m^{j,i})\\) where \\(p\\) is a mapping from 3D camera coordinates to 2D pixel coordinates. The optical flow loss proposed in MonST3R can thus be modified with our dynamic mask and 2D matching coordinates. Details about the optical flow loss are referred to MonST3R [52]." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.689, + 0.483, + 0.723 + ], + "angle": 0, + "content": "B. Fast 3D Reconstruction with video PO-MATO" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.735, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Given a sequence of images less than the temporal window length of 12 frames, dynamic 3D reconstruction can be obtained by directly estimating the pointmaps of all reference images to the coordinate of the key frame as discussed in the Sec.3.4. Here, we provide more visualization results of this feed-forward manner and demonstrate the effectiveness of introducing the temporal motion module. As shown in Fig.8, directly applying the pairwise reconstruction will suffer from an obvious scale shift among different frames. After the temporal motion module, the consistency within the video sequence obtains an obvious enhancement." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.179, + 0.724, + 0.196 + ], + "angle": 0, + "content": "C. Training Data Details" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.204, + 0.905, + 0.25 + ], + "angle": 0, + "content": "The details about the training datasets can be found in Tab.6. The finetuning procedure of POMATO was conducted exclusively using synthetic training datasets." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.263, + 0.877, + 0.281 + ], + "angle": 0, + "content": "D. More Visualizations on Dynamic Scenes" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.288, + 0.906, + 0.365 + ], + "angle": 0, + "content": "We provide more visualizations in Fig. 9 and Fig. 10. MonST3R suffers obvious degeneration when the view displacement is large as reflected by the erroneous pose estimation while POMATO can still provide a consistent camera trajectory." + } + ], + [ + { + "type": "table", + "bbox": [ + 0.157, + 0.171, + 0.842, + 0.277 + ], + "angle": 0, + "content": "
DatasetDomainScene Type# of Frames# of ScenesDynamicsRatio
PointOdyssey [54]SyntheticIndoors & Outdoors200k131Realistic57.1%
TartanAir [45]SyntheticIndoors & Outdoors100k163None14.3%
DynamicReplica [21]SyntheticIndoors145k524Realistic14.3%
ParallelDomain4D [40]SyntheticOutdoors750k15015Driving8.6%
Carla [10]SyntheticOutdoors7k5Driving5.7%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.288, + 0.908, + 0.318 + ], + "angle": 0, + "content": "Table 6. An overview of all training datasets and sample ratio. All datasets provide both camera pose, depth, and most of them include dynamic objects." + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.486, + 0.907, + 0.788 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.802, + 0.908, + 0.83 + ], + "angle": 0, + "content": "Figure 8. Fast 3D reconstruction with our temporal motion module. Given a sequence of images less than temporal window length, our POMATO can directly obtain a global pointmap under the key frame coordinate." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.111, + 0.142, + 0.891, + 0.825 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.14, + 0.835, + 0.856, + 0.851 + ], + "angle": 0, + "content": "Figure 9. Compared with MonST3R, our POMATO can provide more complete dynamic masks and consistent geometry." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.102, + 0.135, + 0.892, + 0.814 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.831, + 0.908, + 0.862 + ], + "angle": 0, + "content": "Figure 10. MonST3R suffers obvious degeneration when the view displacement is large as reflected by the erroneous pose estimation while POMATO can still provide a consistent camera trajectory." + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05692/996e8671-5341-4c85-8ef1-45152320354c_origin.pdf b/data/2025/2504_05xxx/2504.05692/996e8671-5341-4c85-8ef1-45152320354c_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3444cc4aa2de2fbd0b98efecb8b4ce1c48ef105c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/996e8671-5341-4c85-8ef1-45152320354c_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1303f8d0c47942ef0efb7001b1259f90c18c30bc3b374614b7913fcbcd2d50df +size 12579857 diff --git a/data/2025/2504_05xxx/2504.05692/full.md b/data/2025/2504_05xxx/2504.05692/full.md new file mode 100644 index 0000000000000000000000000000000000000000..002992879787361937a12d3b41ae6806d7198c10 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/full.md @@ -0,0 +1,329 @@ +# POMATO: Marrying Pointmap Matching with Temporal Motions for Dynamic 3D Reconstruction + +Songyan Zhang $^{1*}$ Yongtao Ge $^{2,3*}$ Jinyuan Tian $^{2*}$ Guangkai Xu $^{2}$ Hao Chen $^{2\boxtimes}$ Chen Lv $^{1}$ Chunhua Shen $^{2}$ + +1Nanyang Technological University, Singapore 2Zhejiang University, China 3The University of Adelaide, Australia + +![](images/2038d457d38fd0c221b12bdad5e2a377292a6a5391edc6521c9a1a933405292f.jpg) +Figure 1. 3D reconstruction from an arbitrary dynamic video with POMATO. Without relying on external modules, POMATO can directly perform 3D reconstruction along with temporal 3D point tracking and dynamic mask estimation. + +![](images/7ef319305d817284a367ded55f397489a47ecfddf523efa5658896a2d17354bf.jpg) + +![](images/17b51d4a6a4dec36c51e858d07bf3a17155c0a4b6326adbd345b3eed1d1b4ef0.jpg) + +# Abstract + +Recent approaches to 3D reconstruction in dynamic scenes primarily rely on the integration of separate geometry estimation and matching modules, where the latter plays a critical role in distinguishing dynamic regions and mitigating the interference caused by moving objects. Furthermore, the matching module explicitly models object motion, enabling the tracking of specific targets and advancing motion understanding in complex scenarios. Recently, the proposed representation of pointmap in DUSt3R suggests a potential solution to unify both geometry estimation and matching in 3D space, effectively reducing computational overhead by eliminating the need for redundant auxiliary modules. However, it still struggles with ambiguous correspondences in dynamic regions, which limits reconstruction + +performance in such scenarios. In this work, we present POMATO, a unified framework for dynamic 3D reconstruction by marrying POintmap MAtching with Temporal mOtion. Specifically, our method first learns an explicit matching relationship by mapping RGB pixels across different views to 3D pointmaps within a unified coordinate system. Furthermore, we introduce a temporal motion module for dynamic motions that ensures scale consistency across different frames and enhances performance in 3D reconstruction tasks requiring both precise geometry and reliable matching, most notably 3D point tracking. We show the effectiveness of our proposed POMATO by demonstrating the remarkable performance across multiple downstream tasks, including video depth estimation, 3D point tracking, and pose estimation. Code and models are publicly available at https://github.com/wyddmw/POMATO. + +![](images/a572bf8f3f31147ee0b7fe7ce8470f549bc4ee48d2b9a7606bf4cb6e04f57753.jpg) +Image1 + +![](images/865c5f7f18ca194c59889f3437641b2c340f0dae47f7ba5904d226f77a02757d.jpg) +Figure 2. Ambiguity in 3D point matching in dynamic scenes with DUSt3R. Given representative corresponding pixels of background (orange) and moving foreground (red) in two different views, DUSt3R outputs a pair of 3D points within the same coordinate system. In static regions, identical pixels share the same 3D coordinates which provide an accurate matching relationship in 3D space, but in moving regions, the 3D coordinates are inconsistent for corresponding pixels across views, leading to ambiguous 3D matching relationships. + +![](images/22f3ec6d130b32d236544f7a36f8d34c068ee43491968d8b8252afd707b98ed1.jpg) + +# 1. Introduction + +Image-based 3D reconstruction is a fundamental task in computer vision with a wide range of applications including SLAM [39], robotics [19, 49], autonomous driving [53], and novel view synthesis [5]. While substantial progress has been achieved in static 3D reconstruction [16, 23, 26, 44, 51], dynamic scenes remain a major hurdle due to complexities like non-rigid motion and deformation, which may hamper the learning of local structure and camera motion, thereby complicating accurate 3D reconstruction for dynamic scenes. These scenarios require explicit modeling of both scene geometry and object motion. Moreover, downstream reconstruction tasks, such as 3D point tracking, demand precise geometry estimation and robust matching across views. To effectively distinguish dynamic regions, it is essential to establish reliable correspondences between different frames. Some pioneering works have attempted to address dynamic motion by incorporating additional auxiliary matching modules, such as optical flow [42, 52] or 2D tracking [47]. However, these approaches may suffer from domain gaps and accumulated errors between modules, limiting their effectiveness. A unified framework that seamlessly integrates geometry estimation and matching for dynamic 3D reconstruction remains a critical and underexplored challenge. + +Recently, DUSt3R [44] proposes a promising solution to address this challenge. It introduces the concept of a pointmap that assigns each pixel in an image to a corresponding 3D coordinate. The network utilizes a standard transformer-based encoder-decoder architecture and receives a pair of images as input. The system incorporates two parallel decoders to predict pointmaps for each view within the same coordinate system. However, this representation is limited to static matching and struggles in dynamic scenes, as illustrated in Fig. 2. + +To address this problem, we present POMATO, a unified network for dynamic 3D reconstruction by marrying + +POintmap MMatching with Temporal mOtion. We argue that with iterative cross-attention modules across different views, matching features are well preserved in the decoder tokens. We thus introduce an auxiliary pointmap matching head to learn explicit correspondences. Specifically, for each pixel in the second view, the pointmap matching head predicts the corresponding 3D coordinates of its counterpart in the first view, under the shared coordinate system. Our proposed pointmap-based matching representation enables the establishment of explicit correspondences in 3D space, which can be directly leveraged for motion analysis, especially the estimation of dynamic regions. Moreover, we further extend our POMATO to handle 4D video sequences by introducing a temporal motion module that enhances the learning of temporal motions. This motion module promotes scale consistency across different frames and improves performance in tasks where both accurate geometry and reliable matching are paramount, most notably 3D point tracking. Compared with recent temporal 3D reconstruction methods [41, 43] based on an autoregression manner where the previous frames are blocked from the recently added frames, our temporal motion module is based on the self-attention mechanism along the temporal dimension, facilitating a comprehensive interaction across all frames. Our POMATO is trained in a two-stage manner. In the first stage, we used pairwise input images to learn fundamental geometry and matching capabilities. In the second stage, we extend the input to sequential video frames and incorporate the temporal motion module, enabling the model to effectively capture motions over time. + +Our contributions can be summarized in threefold: First, we propose a novel approach that unifies the fundamental geometry estimation and motion understanding for dynamic 3D reconstruction into a single network by incorporating the representation of pointmap matching. Second, we introduce a temporal motion module to facilitate the interactions of motion features along the temporal dimension, which significantly improves the performance in tasks + +where both accurate geometry and precise matching are required for video sequential input, most notably 3D point tracking. Third, we demonstrate promising performance on 3D vision tasks, including video depth estimation, 3D point tracking, and camera pose estimation. + +# 2. Related Work + +Geometry estimation refers to the process of determining the spatial properties and structures from different forms of visual data. Direct recovery of 3D geometry from a single RGB image is by nature an ill-posed problem. Many recent works [3, 16, 23, 51] have tried to leverage strong pre-trained models to learn generalizable depthmaps from large-scale real and synthetic datasets to solve ambiguities. For example, Marigold [23], Geowizard [11], and GenPercept [48] aim at leveraging the generative priors from pre-trained diffusion models by finetuning them on synthetic datasets. Depthanything V2 [51] proposes to estimate scale-and-shift invariant disparity map by finetuning DINOV2 [29] model on synthetic datasets and largescale pseudo labels. Depth Pro [3] further propose a FOV head to estimate the metric depthmap from a single image without relying on camera intrinsics as input. Due to the scale ambiguity in the monocular depth estimation models, ChronoDepth [36], DepthCrafter [17], and Depth-any-video [50] proposes to learn temporal consistent depthmaps by leveraging the priors from a video generative model, i.e. SVD [2]. In another line of the research, multi-view stereo reconstruction (MVS) methods seek to reconstruct visible surfaces from multiple viewpoints. Traditional MVS [12] and SfM pipelines break the reconstruction pipeline into several sub-problems, e.g., feature extraction [8], image matching [1, 27], triangulation, and bundle adjustment [7]. The chain is complicated and accumulates noise for every single step, thus often resulting in unsatisfactory performance in complex real-world scenes. Recognizing the limitations of previous MVS methods, seminal work DUSt3R [44] proposes 3D pointmaps representation, and trains a network from large-scale data to regress the dense and accurate pointmaps from a pair of images. The camera intrinsics and relative camera poses can be implicitly inferred from the two-view pointmaps. However, it still can not handle reconstruction for dynamic scenes. MonST3R [52] directly finetuned the original DUSt3R model upon synthetic datasets that contain dynamic scenes. Motion representation. Optical flow is a commonly used representation for 2D motion. RAFT [38] is a representative work for pairwise optical flow estimation, which employs a 4D cost volume and recurrently estimates the optical flow. Some follow-up methods further extend it to multi-frame (3-5 frames) settings, which is still insufficient for long-range tracking. To resolve the problem, Particle Video [35] represent video motion by using a set of particles. Each + +particle is an image point sample with a long-duration trajectory and other properties. Particle videos have two key advantages over optical flow: (1) persistence through occlusions, and (2) multi-frame temporal context. Some recent works, PIPs [15], TAPIR [9] and Cotracker [22] have renewed interest in this representation and show promising long-term 2D point tracking results. Recognizing the advantage of point representation, SpatialTracker [47] lifts the 2D points into 3D and performs tracking in the 3D space. Though it can handle occlusions and enhance 3D tracking accuracy, it still relies on a separate monocular depth estimator, which prevents it performing 3D point tracking in an end-to-end fashion. + +Multi-view dynamic reconstruction. Our work is closely connected to multi-view dynamic 3D reconstruction techniques. Early works [32, 34] take the straightforward idea that first pre-segment the scene into different regions, each corresponding to a single rigid part of an object, then apply the rigid-SfM technique to each of the regions. Some of the recent Neural Radiance Fields (NeRF) [28] and Gaussian Splatting [24] based methods have achieved state-of-the-art results. However, most of these methods require simultaneous multi-view video inputs or require predefined templates [18]. Shape of motion [42], proposes a new dynamic scene representation to represent the dynamic scene as a set of persistent 3D Gaussians, and optimize the representation from a monocular video by leveraging monocular depth estimation priors and 2D track estimates across frames. + +# 3. Method + +# 3.1. Preliminary + +The overview of our POMATO is demonstrated in Fig.3. We adopt the definition of pointmap $\mathbf{X} \in \mathbb{R}^{H \times W \times 3}$ in DUSt3R [44] as a dense 2D field of 3D points where each point corresponds to its respective RGB pixel. Given a pair of input images $\mathbf{I}^1, \mathbf{I}^2 \in \mathbb{R}^{H \times W \times 3}$ from two different views, a weight-sharing ViT first extracts the corresponding features $\mathbf{F}^1, \mathbf{F}^2$ for each view. Two parallel branches are employed to decode the geometric structures and enhance the feature alignment via cross-attention in decoder modules, following a regression head to estimate pointmaps $\mathbf{X}^{1,1}, \mathbf{X}^{2,1} \in \mathbb{R}^{H \times W \times 3}$ along with a confidence map $\mathbf{C}^{1,1}, \mathbf{C}^{2,1} \in \mathbb{R}^{H \times W}$ for each image view. Generally, $\mathbf{X}^{n,m}$ indicates the pointmap $\mathbf{X}^n$ from camera $n$ expressed in camera $m$ 's coordinate frame, which is obtained by a rigid transformation: + +$$ +\mathbf {X} ^ {n, m} = \mathbf {P} _ {m} \mathbf {P} _ {n} ^ {- 1} h \left(\mathbf {X} ^ {n}\right), \tag {1} +$$ + +where $\mathbf{P}_m, \mathbf{P}_n \in \mathbb{R}^{3 \times 4}$ are world-to-camera poses for camera $m$ and camera $n$ , respectively, and $h(\mathbf{X}^n)$ is a homogeneous mapping for the 3D coordinate in camera coordinate + +![](images/6a30322c438fc8178fdadc08700fa2c23601ca083bfd8c9601b0c19f2ade291e.jpg) +Figure 3. Overview of our training pipeline. (1) Stage I: build upon DUSt3R [44] architecture, we introduce a third regression point-matching head: $\mathrm{Head}_3$ , which is in parallel to $\mathrm{Head}_2$ for explicit pointmap matching in 3D space. For each pixel in the second view, the output pointmap coordinate is the 3D point map of the corresponding pixel in the first view. (2) Stage II: we introduce a temporal fusion module in three heads that enables multi-style sequential input for learning temporal motions. + +![](images/e6d6b6d0d562795bdee50bef700038574cb99c8c3950f3ff6313b68af3401c0f.jpg) + +of camera $n$ + +The task for Decoder 1 and its regression head estimate the 3D points for $\mathbf{I}^1$ in its own coordinate system while Decoder 2 and its regression head are responsible for estimating pixel-wise 3D coordinates for $\mathbf{I}^2$ in $\mathbf{I}^1$ 's coordinate system after a rigid transformation of global rotation and translation. In the following contents, we will first introduce our POMATO with pairwise input images and then extend it to the video sequence input with our temporal motion module. + +# 3.2. Pointmap Matching with Pairwise Input + +As discussed before, the definition of $\mathbf{X}^{2,1}$ depicts a rigid camera transformation that is ambiguous to reflect explicit matching relationships for dynamic regions. To tackle this, we propose to formulate an explicit pointmap matching $\mathbf{X}_m^{2,1} \in \mathbb{R}^{H \times W \times 3}$ that maps dense RGB pixels of $\mathbf{I}^2$ to 3D coordinates of corresponding pixels in $\mathbf{I}^1$ under the first image's coordinate system. Given a 2D query pixel at $(x_2, y_2)$ in $\mathbf{I}^2$ and its corresponding pixel at $(x_1, y_1)$ in $\mathbf{I}^1$ , the matched pointmap at $(x_2, y_2)$ in $\mathbf{I}^2$ is: + +$$ +\mathbf {X} _ {m} ^ {2, 1} \left(x _ {2}, y _ {2}\right) = \mathbf {X} ^ {1, 1} \left(x _ {1}, y _ {1}\right), \tag {2} +$$ + +where $(x,y)$ indicates the coordinates of 2D grid. For the representative dynamic point (red) in Fig. 2, the pointmap matching result is the 3D coordinate of point A in the coordinate system of the first image. As shown in Fig. 3, $\mathbf{X}_m^{2,1}$ and $\mathbf{X}^{1,1}$ are supposed to match perfectly in 3D space on the premise of neglecting occluded regions. We argue that the set of decoder tokens from the second branch preserves abundant matching information with iterative cross-attentions, so we introduce a matching head with the same architecture of $\mathrm{Head}_1$ and $\mathrm{Head}_2$ . The supervision for pointmap matching $\mathbf{X}_m^{2,1}$ still follows the 3D regression loss which is defined as the Euclidean distance: + +$$ +\mathcal {L} _ {\mathrm {m}} = \left\| \frac {1}{z _ {m}} \mathbf {X} _ {m} ^ {2, 1} - \frac {1}{\bar {z} _ {m}} \bar {\mathbf {X}} _ {m} ^ {2, 1} \right\|, \tag {3} +$$ + +where $\bar{\mathbf{X}}_m^{2,1}$ is the ground truth pointmap matching, which can be obtained following Eq. 2 on the 2D tracking dataset + +with the depth and camera information. $z_{m},\bar{z}_{m}$ are the same norm factor defined in DUSt3R. The matching confidence $\mathbf{C}_m^{2,1}$ is also learned following the confidence loss for $\mathrm{Head}_1$ and $\mathrm{Head}_2$ within valid regions: + +$$ +\mathcal {L} _ {\mathrm {m c o n f}} = \mathbf {C} _ {m} ^ {2, 1} \mathcal {L} _ {\mathrm {m}} - \alpha \log \mathbf {C} _ {m} ^ {2, 1} \tag {4} +$$ + +The final loss $\mathcal{L}$ of our POMATO for pairwise input is a combination of predefined DUSt3R loss $\mathcal{L}_{\mathrm{DUSt3R}}$ , matching loss $\mathcal{L}_{\mathrm{m}}$ , and matching confidence loss $\mathcal{L}_{\mathrm{mconf}}$ . When training our POMATO for pairwise input images at the first stage, the parameters in the encoder are frozen. + +# 3.3. Dynamic Mask Estimation + +Taking advantage of the explicit pointmap matching head, our POMATO can directly perform dynamic mask estimation without introducing an assistant module such as the optical flow model, getting rid of the additional computation cost and the potential domain gap. For an image pair $\{\mathbf{I}^i,\mathbf{I}^j\}$ along with the estimation of $\mathbf{X}^{j,i}$ from $\mathrm{Head}_2$ and $\mathbf{X}_{m}^{j,i}$ from $\mathrm{Head}_3$ , the dynamic mask $\mathbf{D}^{j,i}$ can be obtained by comparing the difference between $\mathbf{X}^{j,i}$ and $\mathbf{X}_{m}^{j,i}$ : + +$$ +\mathbf {D} ^ {j, i} = \left| \left| \mathbf {X} _ {m} ^ {j, i} - \mathbf {X} ^ {j, i} \right| \right| > \alpha , \tag {5} +$$ + +where $\alpha$ is a dynamic threshold defined as $3 \times$ median $(\|\mathbf{X}_m^{j,i} - \mathbf{X}^{j,i}\|)$ . The explicit dynamic mask can be incorporated into the global alignment process to minimize the interference of moving objects for pose estimation and 3D reconstruction. Details on the incorporation of dynamic masks for global alignment are provided in the supplementary materials. + +# 3.4. Temporal Motion Module + +With the fundamental capability of geometric estimation and pointmap matching for pairwise images, we follow [6] and extend our POMATO to 4D video sequences by inserting a transformer-based motion module into the vanilla DPT head to construct the "temporal DPT head", which is illustrated in Fig.4. For a set of decoder tokens $\mathbf{G} \in \mathbb{R}^{B,T,N,C}$ + +![](images/e2fe70253e4295dea812bf5a75af67c019b523b5a34fe2c4ad961025ba265b7c.jpg) +Figure 4. Architecture of our temporal motion module. We insert a transformer-based motion module (in shallow yellow) into the vanilla DPT [33] head to enhance the temporal consistency. + +where $B, T, N, C$ represent the batch size, window length of a video sequence, token number, and token dimension, respectively, we merge the token number dimension into the batch axis and apply the motion module which consists of two blocks of standard multi-head self-attention modules and feed-forward networks along the temporal dimension $T$ . To reduce the computation cost, the temporal motion modules are applied to features of low resolution. + +# 3.5. Downstream Temporal Tasks + +Given a video sequence of $T$ frames $\mathbf{I}^{t_1},\mathbf{I}^{t_2},\ldots ,\mathbf{I}^{t_T}$ , we construct a unique set of stereo image pairs for each task. As illustrated in Fig. 5, the flexible construction of input pairs—combined with the proposed temporal motion module and pointmap matching head—enables POMATO to seamlessly address downstream temporal tasks, including 3D point tracking, video depth estimation, and 3D reconstruction. The keyframe selection strategy and input formulation for each task are detailed in the following section. + +Besides the default regression losses for $\mathrm{Head}_1$ and $\mathrm{Head}_2$ , and predefined losses Eq. 3 and Eq. 4 for $\mathrm{Head}_3$ , we further employ a temporal consistency loss, $\mathcal{L}_{\mathrm{t}}$ , which will be described in detail below. + +In addition to the default regression losses for $\mathrm{Head}_1$ and $\mathrm{Head}_2$ , and the predefined losses in Eq. 3 and Eq. 4 for $\mathrm{Head}_3$ , we further introduce a temporal consistency loss, $\mathcal{L}_{\mathrm{t}}$ , which will also be described in detail below. + +3D Point Tracking. As illustrated at the top of Fig.5, the keyframe is set to the first image of the global video sequence and fed to the proposed $\mathrm{Head}_3$ to obtain the pointmap matching result of each query point (initialized at the first image) under the coordinate system of each reference frame $\{\mathbf{X}_{m}^{t_{1},t_{1}},\mathbf{X}_{m}^{t_{1},t_{2}},\mathbf{X}_{m}^{t_{1},t_{3}},\dots \mathbf{X}_{m}^{t_{1},t_{T}}\}$ , while the set of reference frames $\{\mathbf{I}^{t_1},\mathbf{I}^{t_2},\mathbf{I}^{t_3},\dots \mathbf{I}^{t_T}\}$ is fed to the $\mathrm{Head}_1$ to obtain the pointmap under each ego coordinate system. The dense tracking results can be further sparsified by indexing the 2D coordinates. When inference on a video + +![](images/8239ab20be4562800b2c68569d3d5ab2c42bca1f03dcdbe48b9eabdda64d41a0.jpg) +Figure 5. Inference pipelines for point tracking, video depth, and multi-view reconstruction. $t_k$ indicates the keyframe. With the help of the motion module and flexible input construction, PO-MATO can be easily applied to downstream temporal tasks. + +longer than $T$ frames, a simple sliding-window approach with an overlap of four frames is adopted to enhance the consistency between adjacent video windows. The temporal consistency loss $\mathcal{L}_{\mathrm{t}}$ for tracking is: + +$$ +\mathcal {L} _ {\mathrm {t}} = \frac {1}{T} \sum_ {i = 1} ^ {T} \left\| \frac {\mathbf {X} _ {m} ^ {t _ {1} , t _ {i}}}{z _ {m} ^ {T}} - \frac {\bar {\mathbf {X}} _ {m} ^ {t _ {1} , t _ {i}}}{\bar {z} _ {m} ^ {T}} \right\| + \left\| \frac {\mathbf {X} ^ {t _ {i} , t _ {i}}}{z ^ {T}} - \frac {\bar {\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\bar {z} ^ {T}} \right\|, \tag {6} +$$ + +where $z_{m}^{T} = \mathrm{norm}\left(\mathbf{X}_{m}^{t_{1},t_{1}},\mathbf{X}_{m}^{t_{1},t_{2}},\dots,\mathbf{X}_{m}^{t_{1},t_{T}}\right)$ and $\bar{z}_T =$ norm $(\bar{\mathbf{X}}_m^{t_1,t_1},\bar{\mathbf{X}}_m^{t_1,t_2},\dots,\bar{\mathbf{X}}_m^{t_1,t_T})$ . $z_{m}^{T}$ and $\bar{z}_T$ are defined analogously. + +Video Depth Estimation. As shown in the middle part of the Fig. 5, the input video sequence is formulated to a set of identical image pairs $\{(\mathbf{I}^{t_1},\mathbf{I}^{t_1}),(\mathbf{I}^{t_2},\mathbf{I}^{t_2}),\dots,(\mathbf{I}^{t_T},\mathbf{I}^{t_T})\}$ and fed to $\mathrm{Head}_1$ and $\mathrm{Head}_2$ , where the predictions from each head are identical: $\{\mathbf{X}^{t_1,t_1},\mathbf{X}^{t_2,t_2},\dots,\mathbf{X}^{t_N,t_N}\}$ . We use the output of $\mathrm{Head}_1$ as our final video depth estimation. The temporal consistency loss $\mathcal{L}_{\mathrm{t}}$ is defined as: + +$$ +\mathcal {L} _ {\mathrm {t}} = \frac {1}{T} \sum_ {i = 1} ^ {T} \left\| \frac {\mathbf {X} _ {1} ^ {t _ {i} , t _ {i}}}{z _ {1} ^ {T}} - \frac {\bar {\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\bar {z} ^ {T}} \right\| + \left\| \frac {\mathbf {X} _ {2} ^ {t _ {i} , t _ {i}}}{z _ {2} ^ {T}} - \frac {\bar {\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\bar {z} ^ {T}} \right\|, (7) +$$ + +where $\mathbf{X}_1^{t_i,t_i}$ and $\mathbf{X}_2^{t_i,t_i}$ indicate the output from Head_1 and Head_2, respectively. $\bar{\mathbf{X}}^{t_i,t_i}$ is the pointmap groundtruth. + +3D Reconstruction. Assisted by the temporal motion module, redundant post-process operations such as global alignment can be omitted, allowing the reconstructed 3D point cloud to be obtained in a feed-forward manner. As shown in the bottom part of Fig.5, the keyframe is set to the last frame + +
AlignmentMethodOptim. Onl.Sintel [4]BONN [30]KITTI [13]
Abs Rel ↓δ<1.25 ↑Abs Rel ↓δ<1.25 ↑Abs Rel ↓δ<1.25 ↑
Per-sequence scaleDUSt3R-GA [44]0.65645.20.15583.30.14481.3
MASt3R-GA [26]0.64143.90.25270.10.18374.5
MonST3R-GA [52]0.37855.80.06796.30.16874.4
Spann3R [41]0.62242.60.14481.30.19873.7
CUT3R [43]0.42147.90.07893.70.11888.1
POMATO0.41653.60.07496.10.08593.3
Per-sequence scale & shiftMonST3R-GA [52]0.33558.50.06396.40.10489.5
CUT3R [43]0.46656.20.11188.30.07594.3
POMATO0.34557.90.07296.50.08493.4
+ +Table 1. Video depth evaluation. We report scale-invariant depth and scale & shift invariant depth accuracy on Sintel [4], Bonn [30], and KITTI [13] datasets. Methods requiring global alignment are marked “GA”, while “Optim.” and “Onl.” indicate optimization-based and online methods, respectively. The best and second best results in each category are bold and underlined, respectively. + +$\mathbf{I}^{t_T}$ within the temporal window of length $T$ and is fed to $\mathrm{Head}_1$ with a set output of $\{\mathbf{X}^{t_T,t_T},\mathbf{X}^{t_T,t_T},\dots,\mathbf{X}^{t_T,t_T}\}$ . All the reference frames are input to the $\mathrm{Head}_2$ so the target pointmaps $\{\mathbf{X}^{t_1,t_T},\mathbf{X}^{t_2,t_T},\dots,\mathbf{X}^{t_T,t_T}\}$ are aligned under the coordinate system of the keyframe. The temporal consistency loss $\mathcal{L}_{\mathrm{t}}$ is: + +$$ +\mathcal {L} _ {\mathrm {t}} = \frac {1}{T} \sum_ {i = 1} ^ {T} \left\| \frac {\mathbf {X} ^ {t _ {T} , t _ {T}}}{z _ {1} ^ {T}} - \frac {\bar {\mathbf {X}} ^ {t _ {T} , t _ {T}}}{\bar {z} _ {1} ^ {T}} \right\| + \left\| \frac {\mathbf {X} ^ {t _ {i} , t _ {T}}}{z _ {2} ^ {T}} - \frac {\bar {\mathbf {X}} ^ {t _ {i} , t _ {T}}}{\bar {z} _ {2} ^ {T}} \right\| \tag {8} +$$ + +We further freeze the parameters in Decoder1 and Decoder2 when training the temporal downstream tasks at the second stage. In our work, the temporal window length $T$ is set to 12. Additional explorations on the temporal length can be found in Sec.4. + +# 4. Experiments + +# 4.1. Experimental Details + +Training data. We train our network with a mixture of five datasets: PointOdyssey [54], Tartanair [45], ParallelDomain4D [40], DynamicReplica [21] and Carla (0.9.15) [10]. The specific number and the usage ratio of each dataset can be found in the supplementary materials. All datasets include pixel-accurate ground truth depth, as well as camera intrinsics and extrinsics, and encompass a wide variety of dynamic scenes across both indoor and outdoor environments. Among them, PointOdyssey and DynamicReplica have additional 2D trajectory annotations for dynamic objects which can be used to construct pointmap matching ground truth following Eq. 2. All datasets are used to supervise geometry learning on $\mathrm{Head}_1$ and $\mathrm{Head}_2$ , while only PointOdyssey, DynamicReplica, and TartanAir are used to train the proposed pointmap matching head. + +Training and inference details. Our model architecture is based on the publicly available DUSt3R [52] model, utilizing the same backbone consisting of a ViT-Large encoder + +and a ViT-Base decoder. To fully leverage MonST3R's geometry estimation capabilities in dynamic scenes, we initialize our model using the publicly available MonST3R checkpoint. For the newly introduced pointmap matching head, we initialize its weights from the pretrained $\mathrm{Head}_2$ weights of MonST3R. The temporal motion module is initialized following [14]. We train our network for 10 epochs with a cosine learning rate schedule, with an initial learning rate of 1e-4. In the first stage, which involves pairwise training, we use a batch size of 16 on 4 A100 GPUs (40G). In the second stage, where the temporal motion module is introduced, the batch size is set to 4 with a fixed temporal window length of 12. During each training iteration, we randomly sample a downstream task—3D point tracking, video depth estimation, or 3D reconstruction—to construct the input pairs and apply the corresponding loss function. + +# 4.2. Video Depth Estimation + +Following MonST3R [52] and CUT3R [43], we rescale all predictions from the same video to align them together by conducting two forms of alignment: per-sequence scale and shift alignment and per-sequence scale alignment. Thus, we can measure the per-frame depth quality and inter-frame depth consistency. We employ our proposed motion module for video depth estimation in a feed-forward manner as described in Sec.3.5 and compare our method against several variants of DUSt3R, including DUSt3R [44], MAST3R [26], MonST3R [52], Spann3R [41], and CUT3R [43]. Given 6 frames of $288 \times 512$ on an NVIDIA 4070 GPU, POMATO reconstructs the 3D point cloud in 0.7 seconds, whereas global alignment-based methods such as MonST3R require 5.8 seconds. As shown in Tab. 1, our method demonstrates comparable performance to the global alignment (GA)-based MonST3R [52] on the Sintel [4] and BONN [30] datasets, while surpassing it on KITTI dataset. Besides, we + +
MethodPointOdyssey [54]ADT [31]PStudio [20]Average
L-12L-24L-12L-24L-12L-24L-12L-24
SpatialTracker* [47]20.4620.7121.6420.6730.4125.8724.1722.42
DUSt3R [44]19.0319.0329.0225.559.726.5019.2617.03
MASt3R [26]16.5817.3527.3626.4611.788.0918.5717.30
MonST3R [52]27.3127.9228.3026.1316.5011.0624.0321.70
POMATO33.2033.5831.5728.2224.5919.7929.7927.20
+ +Table 2. 3D tracking evaluation. We report the APD metric to evaluate 3D point tracking on the PointOdyssey [54], ADT [31], and PStudio [20] datasets. L-12 and L-24 indicate tracking within the temporal length of 12 frames and 24 frames, respectively. + +![](images/ffa2a8e80de9dcbc736d37d3eb5d1989e36111cb092a54c8fa4c690e19d35a0a.jpg) +Figure 6. Qualitative comparison of dynamic scenes. Compared to MonST3R, our POMATO can provide more reliable motion masks, 3D point tracking, and reconstruction performance. + +consistently outperform the state-of-the-art online method, CUT3R [43], across various settings. These results underscore the effectiveness of our approach, specifically (1) the joint learning of geometry and pointmap matching, and (2) the temporal motion module. + +# 4.3. 3D Point Tracking + +For 3D point tracking task, we use the Aria Digital Twin (ADT) [31], and Panoptic Studio (PStudio) [20] benchmarks from the TAPVid-3D [25] dataset along with the validation set on the PointOdyssey [54] dataset. We report the Average Percent Deviation (APD) metric, which quantifies the average percentage of points within a threshold relative to the ground truth depth. The APD metric serves as a direct measure of the accuracy of the predicted tracking. We reformulate the datasets and project all the query points within a temporal window to the first frame. We report tracking results on the length of 12 and 24 frames. As shown in Tab.2, our POMATO achieves the best performance on both PointOdyssey and ADT datasets. It's worth mentioning that SpatialTracker [47] is a state-of-the-art network tailored for 3D point tracking with ground truth camera intrinsic as ad + +ditional input data. POMATO surpasses it on two datasets and improves the average APD metric by $23.3\%$ and $21.4\%$ for 12 frames and 24 frames, respectively. For DUSt3R-based methods, we use the output of $\mathrm{Head}_2$ as tracking results. Obviously, the ambiguous matching representation limits its capability to handle this fine-grained 3D reconstruction task in dynamic scenes. + +# 4.4. Camera Pose Estimation + +Following DUSt3R-based methods, we perform global alignment with the model trained in the first stage on the Bonn [30] and TUM [37] datasets. The sampling stride is set to 5 for the Bonn dataset and 3 for the TUM dataset. Compared with optical-flow assisted global alignment in MonST3R, the dynamic mask is computed according to Eq. 5 while the 2D pseudo label is replaced by projecting the pointmap matching results to 2D coordinates with estimated camera intrinsic. Absolute Translation Error (ATE), Relative Translation Error (RPE trans), and Relative Rotation Error (RPE rot) are reported. The evaluation results over 40 frames are reported in Tab. 4. Notably, POMATO obtains an overall state-of-the-art performance and signifi + +
Temporal LengthVideo DepthTracking (12 Frames)
Sintel [4]Bonn [30]KITTI [13]PointOdyssey [54] ADT [31] PStudio [20]
Abs Rel ↓ δ<1.25 ↑Abs Rel ↓ δ<1.25 ↑Abs Rel ↓ δ<1.25 ↑Abs Rel ↓ δ<1.25 ↑APD↑APD↑APD↑
Pair-wise0.54846.20.08794.00.11389.532.0629.8723.10
6 frames0.43651.30.07695.90.08593.532.6930.9324.52
12 frames0.41653.60.07596.10.08693.333.2031.5724.59
+ +Table 3. Ablation study on the temporal motion module. The introduction of the temporal motion module brings a significant improvement. As the temporal window length enlarges from 6 frames to 12 frames, we obtain an overall consistent improvement. + +
MethodTUM [37]Bonn [30]
ATE ↓RPE trans ↓RPE rot ↓ATE ↓RPE trans ↓RPE rot ↓
DUSt3R [44]0.0250.0132.3610.0300.0252.522
MASt3R [26]0.0270.0151.9100.0310.0252.478
MonST3R [52]0.0210.0061.1420.0250.0212.120
CUT3R [43]0.0230.0160.5100.0280.0332.569
POMATO0.0200.0100.5090.0370.0161.782
+ +![](images/48fc00ed0ccff1357aa20e08011e8b828d049473515dabed56bb834c4997282a.jpg) + +![](images/98a876741c25930c1c2f4bfce1d1c178379109f82b67d15153cad3df741cec13.jpg) + +![](images/c01e277c9d6d6d62313b376a0e50c866e3c5aa3c837fecebdb575154bd8f9543.jpg) +Input Images +Figure 7. Effectiveness of our pointmap matching head. Without explicitly filtering out the motion area, both pose and geometry estimation will be degraded. + +![](images/abf58e9ec06158973e58dbe4a60955efcc728a923015cf9b8a45e5acad73ffcb.jpg) + +![](images/a00bf8bcacaf47797f494012a320e2a40fdd47220aa6b3eb0c87cdcafa2a834a.jpg) +3D Reconstruction with our Pointmap Matching. + +Table 4. Pose estimation. Our method achieves an overall best performance and improves the RPE rot metric significantly. + +
MethodBonn [30]PointOdyssey [54]ADT [31]PStudio [20]
ATE ↓RPE trans ↓RPE rot ↓APD ↑APD ↑APD ↑
W/O Head30.0400.0151.72129.1029.6216.94
W/ Head30.0370.0161.78232.0629.8723.10
+ +Table 5. Ablation study on the effectiveness of the pointmap matching head. The comparisons are reported on the pose estimation and 3D point tracking tasks. + +cantly improves the RPE-rot metric, surpassing MonST3R by $55.4\%$ and $13.3\%$ on the TUM and Bonn datasets. + +# 4.5. Ablation Study + +We conduct extensive ablation studies to evaluate the effectiveness of the temporal motion module and the proposed pointmap matching head. As shown in Table 3, we report results for three models: one trained with only pairwise + +images (first-stage training), one using a shorter temporal window of 6 frames, and another using the default temporal window length of 12 frames. Incorporating temporal consistency yields substantial improvements across all datasets for video depth estimation and 3D point tracking. Further improvement is achieved when the temporal window length increases from 6 frames to 12 frames. In Table 5, we evaluate the effectiveness of the pointmap matching head. While it introduces only a modest improvement in the ATE metric, we attribute this to the limited motion and minimal viewpoint variation in the indoor evaluation dataset. As illustrated in Fig. 7, under challenging in-the-wild conditions with significant motion and rapid viewpoint changes, removing the pointmap matching head introduces ambiguity in explicit rigid transformation estimation, resulting in a clear degradation in performance. To further demonstrate the impact of the pointmap matching head on 3D point tracking, we conduct tracking experiments over 12 frames using the pairwise input setup. Clearly, removing the pointmap matching head (using only $\mathrm{Head}_2$ ) leads to an inevitable performance drop, emphasizing explicit correspondence modeling for reliable long-term tracking. + +# 5. Discussion and Conclusion + +We introduce POMATO, a unified framework for geometry estimation and motion understanding in dynamic scenes. By leveraging the proposed pointmap matching head, our method effectively distinguishes moving regions, thereby mitigating the interference introduced by dynamic objects. The temporal motion module further facilitates the learning of temporal dynamics across frames, enhancing scale consistency and improving performance in tasks where both geometry and matching are critical—most notably, 3D point tracking. The downstream temporal tasks including 3D point tracking, video depth estimation, and 3D reconstruction can be easily applied in a feed-forward manner. In future work, we plan to scale up training with more dynamic reconstruction and matching datasets to further enhance 3D reconstruction and tracking performance. + +Acknowledgement. This work was supported by the National Natural Science Foundation of China (No. 62206244) + +# References + +[1] Daniel Barath, Dmytro Mishkin, Luca Cavalli, Paul-Edouard Sarlin, Petr Hruby, and Marc Pollefeys. Affineglue: Joint matching and robust estimation. arXiv preprint arXiv:2307.15381, 2023. 3 +[2] Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendelevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, Varun Jampani, and Robin Rombach. Stable video diffusion: Scaling latent video diffusion models to large datasets. abs/2311.15127, 2023. 3 +[3] Aleksei Bochkovskii, Amael Delaunoy, Hugo Germain, Marcel Santos, Yichao Zhou, Stephan R. Richter, and Vladlen Koltun. Depth pro: Sharp monocular metric depth in less than a second. arXiv, 2024. 3 +[4] Daniel J. Butler, Jonas Wulff, Garrett B. Stanley, and Michael J. Black. A naturalistic open source movie for optical flow evaluation. In ECCV, pages 611-625, 2012. 6, 8 +[5] Ang Cao and Justin Johnson. Hexplane: A fast representation for dynamic scenes. CVPR, 2023. 2 +[6] Sili Chen, Hengkai Guo, Shengnan Zhu, Feihu Zhang, Zi long Huang, Jiashi Feng, and Bingyi Kang. Video depth anything: Consistent depth estimation for super-long videos. arXiv preprint arXiv:2501.12375, 2025. 4 +[7] Yu Chen, Yisong Chen, and Guoping Wang. Bundle adjustment revisited. arXiv preprint arXiv: 1912.03858, 2019. 3 +[8] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 224-236, 2018. 3 +[9] Carl Doersch, Yi Yang, Mel Vecerik, Dilara Gokay, Ankush Gupta, Yusuf Aytar, Joao Carreira, and Andrew Zisserman. Tapir: Tracking any point with per-frame initialization and temporal refinement. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10061-10072, 2023. 3 +[10] Alexey Dosovitskiy, German Ros, Felipe Codevilla, Antonio Lopez, and Vladlen Koltun. Carla: An open urban driving simulator. In Conference on robot learning, pages 1-16. PMLR, 2017. 6, 2 +[11] Xiao Fu, Wei Yin, Mu Hu, Kaixuan Wang, Yuexin Ma, Ping Tan, Shaojie Shen, Dahua Lin, and Xiaoxiao Long. Geowizard: Unleashing the diffusion priors for 3d geometry estimation from a single image. arXiv preprint arXiv: 2403.12013, 2024. 3 +[12] Yasutaka Furukawa, Carlos Hernández, et al. Multi-view stereo: A tutorial. Foundations and Trends® in Computer Graphics and Vision, 9(1-2):1-148, 2015. 3 +[13] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In CVPR, pages 3354-3361, 2012. 6, 8 +[14] Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff:Animate your personalized text-to-image diffusion models without specific tuning, 2023.6 + +[15] Adam W. Harley, Zhaoyuan Fang, and Katerina Fragkiadaki. Particle video revisited: Tracking through occlusions using point trajectories. In ECCV, pages 59-75. Springer, 2022. 3 +[16] Mu Hu, Wei Yin, Chi Zhang, Zhipeng Cai, Xiaoxiao Long, Hao Chen, Kaixuan Wang, Gang Yu, Chunhua Shen, and Shaojie Shen. Metric3d v2: A versatile monocular geometric foundation model for zero-shot metric depth and surface normal estimation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 2, 3 +[17] Wenbo Hu, Xiangjun Gao, Xiaoyu Li, Sijie Zhao, Xiaodong Cun, Yong Zhang, Long Quan, and Ying Shan. Depthcrafter: Generating consistent long depth sequences for open-world videos. In CVPR, 2025. 3 +[18] Mustafa Işık, Martin Rünz, Markos Georgopoulos, Taras Khakhulin, Jonathan Starck, Lourdes Agapito, and Matthias Nießner. Humanrf: High-fidelity neural radiance fields for humans in motion. ACM Transactions on Graphics (TOG), 42(4):1-12, 2023. 3 +[19] Muhammad Zubair Irshad, Mauro Comi, Yen-Chen Lin, Nick Heppert, Abhinav Valada, Rares Ambrus, Zsolt Kira, and Jonathan Tremblay. Neural fields in robotics: A survey. arXiv preprint arXiv: 2410.20220, 2024. 2 +[20] Hanbyul Joo, Tomas Simon, Xulong Li, Hao Liu, Lei Tan, Lin Gui, Sean Banerjee, Timothy Scott Godisart, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh. Panoptic studio: A massively multiview system for social interaction capture. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2017. 7, 8 +[21] Nikita Karaev, Ignacio Rocco, Benjamin Graham, Natalia Neverova, Andrea Vedaldi, and Christian Rupprecht. Dynamic stereo: Consistent dynamic depth from stereo videos. CVPR, 2023. 6, 2 +[22] Nikita Karaev, Ignacio Rocco, Benjamin Graham, Natalia Neverova, Andrea Vedaldi, and Christian Rupprecht. Co-tracker: It is better to track together. In Proc. ECCV, 2024. 3 +[23] Bingxin Ke, Anton Obukhov, Shengyu Huang, Nando Metzger, Rodrigo Caye Daudt, and Konrad Schindler. Repurposing diffusion-based image generators for monocular depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2, 3 +[24] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM TOG, 42(4):139-1, 2023. 3 +[25] Skanda Koppula, Ignacio Rocco, Yi Yang, Joe Heyward, João Carreira, Andrew Zisserman, Gabriel Brostow, and Carl Doersch. Tapvid-3d: A benchmark for tracking any point in 3d. arXiv preprint arXiv: 2407.05921, 2024. 7 +[26] Vincent Leroy, Yohann Cabon, and Jérôme Revaud. Grounding image matching in 3d with mast3r. European Conference on Computer Vision, 2024. 2, 6, 7, 8 +[27] Philipp Lindenberger, Paul-Edouard Sarlin, and Marc Pollefeys. Lightglue: Local feature matching at light speed. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17627-17638, 2023. 3 +[28] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: + +Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 3 +[29] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, and Marc Szafraniec et al. DINOv2: Learning robust visual features without supervision. Trans. Mach. Learn. Research, 2024. 3 +[30] Emanuele Palazzolo, Jens Behley, Philipp Lottes, Philippe Giguere, and Cyril Stachniss. Refusion: 3d reconstruction in dynamic environments for rgb-d cameras exploiting residuals. In 2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 7855-7862. IEEE, 2019. 6, 7, 8 +[31] Xiaqing Pan, Nicholas Charron, Yongqian Yang, Scott Peters, Thomas Whelan, Chen Kong, Omkar Parkhi, Richard Newcombe, and Carl Yuheng Ren. Aria digital twin: A new benchmark dataset for egocentric 3d machine perception. arXiv preprint arXiv: 2306.06362, 2023. 7, 8 +[32] Rene Ranftl, Vibhav Vineet, Qifeng Chen, and Vladlen Koltun. Dense monocular depth estimation in complex dynamic scenes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4058-4066, 2016. 3 +[33] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In Proceedings of the IEEE/CVF international conference on computer vision, pages 12179-12188, 2021. 5 +[34] Chris Russell, Rui Yu, and Lourdes Agapito. Video popuup: Monocular 3d reconstruction of dynamic scenes. In European conference on computer vision, pages 583-598. Springer, 2014. 3 +[35] Peter Sand and Seth Teller. Particle video: Long-range motion estimation using point trajectories. International journal of computer vision, 80:72-91, 2008. 3 +[36] Jiahao Shao, Yuanbo Yang, Hongyu Zhou, Youmin Zhang, Yujun Shen, Matteo Poggi, and Yiyi Liao. Learning temporally consistent video depth from video diffusion priors. abs/2406.01493, 2024. 3 +[37] Jürgen Sturm, Nikolas Engelhard, Felix Endres, Wolfram Burgard, and Daniel Cremers. A benchmark for the evaluation of RGB-D SLAM systems. pages 573-580, 2012. 7, 8 +[38] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part II 16, pages 402–419. Springer, 2020. 3 +[39] Zachary Teed and Jia Deng. Droid-slam: Deep visual slam for monocular, stereo, and rgb-d cameras. Neural Information Processing Systems, 2021. 2 +[40] Basile Van Hoorick, Rundi Wu, Ege Ozguroglu, Kyle Sargent, Ruoshi Liu, Pavel Tokmakov, Achal Dave, Changxi Zheng, and Carl Vondrick. Generative camera dolly: Extreme monocular dynamic novel view synthesis. arXiv preprint arXiv:2405.14868, 2024. 6, 2 +[41] Hengyi Wang and Lourdes Agapito. 3d reconstruction with spatial memory. arXiv preprint arXiv:2408.16061, 2024. 2, 6 + +[42] Qianqian Wang, Vickie Ye, Hang Gao, Jake Austin, Zhengqi Li, and Angjoo Kanazawa. Shape of motion: 4d reconstruction from a single video. arXiv preprint arXiv:2407.13764, 2024. 2, 3 +[43] Qianqian Wang, Yifei Zhang, Aleksander Holynski, Alexei A. Efros, and Angjoo Kanazawa. Continuous 3d perception model with persistent state, 2025. 2, 6, 7, 8 +[44] Shuzhe Wang, Vincent Leroy, Yohann Cabon, Boris Chidlovskii, and Jerome Revaud. DUSt3R: Geometric 3D vision made easy. In CVPR, pages 20697-20709, 2024. 2, 3, 4, 6, 7, 8 +[45] Wenshan Wang, Delong Zhu, Xiangwei Wang, Yaoyu Hu, Yuheng Qiu, Chen Wang, Yafei Hu, Ashish Kapoor, and Sebastian Scherer. TartanAir: A dataset to push the limits of visual SLAM. pages 4909-4916, 2020. 6, 2 +[46] Yihan Wang, Lahav Lipson, and Jia Deng. SEA-RAFT: Simple, efficient, accurate RAFT for optical flow. In ECCV, 2024. 1 +[47] Yuxi Xiao, Qianqian Wang, Shangzhan Zhang, Nan Xue, Sida Peng, Yujun Shen, and Xiaowei Zhou. Spatialtracker: Tracking any 2d pixels in 3d space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2, 3, 7 +[48] Guangkai Xu, Yongtao Ge, Mingyu Liu, Chengxiang Fan, Kangyang Xie, Zhiyue Zhao, Hao Chen, and Chunhua Shen. Diffusion models trained with large data are transferable visual models. arXiv preprint arXiv: 2403.06090, 2024. 3 +[49] Yueming Xu, Haochen Jiang, Zhongyang Xiao, Jianfeng Feng, and Li Zhang. Dg-slam: Robust dynamic gaussian splatting slam with hybrid pose optimization. arXiv preprint arXiv: 2411.08373, 2024. 2 +[50] Honghui Yang, Di Huang, Wei Yin, Chunhua Shen, Haifeng Liu, Xiaofei He, Binbin Lin, Wanli Ouyang, and Tong He. Depth any video with scalable synthetic data. arXiv preprint arXiv:2410.10815, 2024. 3 +[51] Lihe Yang, Bingyi Kang, Zilong Huang, Zhen Zhao, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything v2. arXiv:2406.09414, 2024. 2, 3 +[52] Junyi Zhang, Charles Herrmann, Junhwa Hur, Varun Jampani, Trevor Darrell, Forrester Cole, Deqing Sun, and Ming-Hsuan Yang. Monst3r: A simple approach for estimating geometry in the presence of motion. arXiv preprint arxiv:2410.03825, 2024. 2, 3, 6, 7, 8, 1 +[53] Guosheng Zhao, Chaojun Ni, Xiaofeng Wang, Zheng Zhu, Xueyang Zhang, Yida Wang, Guan Huang, Xinze Chen, Boyuan Wang, Youyi Zhang, Wenjun Mei, and Xingang Wang. Drivedreamer4d: World models are effective data machines for 4d driving scene representation. 2024. 2 +[54] Yang Zheng, Adam W. Harley, Bokui Shen, Gordon Wetzstein, and Leonidas J. Guibas. PointOdyssey: A large-scale synthetic dataset for long-term point tracking. In ICCV, 2023. 6, 7, 8, 2 + +# POMATO: Marrying Pointmap Matching with Temporal Motions for Dynamic 3D Reconstruction + +Supplementary Material + +# A. Pointmap Matching for Global Alignment. + +Given a sequence of video frames, the target of global alignment is to project all pairwise estimated pointmaps to the same global world coordinates. DUSt3R constructs a connectivity pairwise graph and aims to minimize the reprojection error for each image pair globally where the dynamic regions are supposed to be separated from the static regions. To this end, MonST3R [52] further introduces an assistant optical flow network [46] to help mask the dynamic regions and provide a pseudo label of 2D matching for minimizing the re-projection error in static regions. However, the introduced assistant model will introduce inevitable domain gaps and additional computation costs. Besides, the optical flow model is tailored for matching within two adjacent frames, suffering an obvious degeneration with the large view displacement. In POMATO, for an image pair $\{\mathbf{I}^i,\mathbf{I}^j\}$ , the dynamic mask $\mathbf{D}^{j,i}$ is calculated by comparing the difference between $\mathbf{X}^{j,i}$ and $\mathbf{X}_m^{j,i}$ : + +$$ +\mathbf {D} ^ {j, i} = \left| \left| \mathbf {X} _ {m} ^ {j, i} - \mathbf {X} ^ {j, i} \right| \right| > \alpha , \tag {9} +$$ + +where $\alpha$ is a dynamic threshold defined as $3 \times$ median $(\|\mathbf{X}_m^{j,i} - \mathbf{X}^{j,i}\|)$ . + +Given the updated camera intrinsic $\tilde{K}$ after an iteration of optimization, the target matching 2D coordinates $\mathbf{F}_m^{j,i} \in \mathbb{R}^{H \times W \times 2}$ can be calculated as $\mathbf{F}_m^{j,i} = p(\tilde{\mathbf{K}}\mathbf{X}_m^{j,i})$ where $p$ is a mapping from 3D camera coordinates to 2D pixel coordinates. The optical flow loss proposed in MonST3R can thus be modified with our dynamic mask and 2D matching coordinates. Details about the optical flow loss are referred to MonST3R [52]. + +# B. Fast 3D Reconstruction with video PO-MATO + +Given a sequence of images less than the temporal window length of 12 frames, dynamic 3D reconstruction can be obtained by directly estimating the pointmaps of all reference images to the coordinate of the key frame as discussed in the Sec.3.4. Here, we provide more visualization results of this feed-forward manner and demonstrate the effectiveness of introducing the temporal motion module. As shown in Fig.8, directly applying the pairwise reconstruction will suffer from an obvious scale shift among different frames. After the temporal motion module, the consistency within the video sequence obtains an obvious enhancement. + +# C. Training Data Details + +The details about the training datasets can be found in Tab.6. The finetuning procedure of POMATO was conducted exclusively using synthetic training datasets. + +# D. More Visualizations on Dynamic Scenes + +We provide more visualizations in Fig. 9 and Fig. 10. MonST3R suffers obvious degeneration when the view displacement is large as reflected by the erroneous pose estimation while POMATO can still provide a consistent camera trajectory. + +
DatasetDomainScene Type# of Frames# of ScenesDynamicsRatio
PointOdyssey [54]SyntheticIndoors & Outdoors200k131Realistic57.1%
TartanAir [45]SyntheticIndoors & Outdoors100k163None14.3%
DynamicReplica [21]SyntheticIndoors145k524Realistic14.3%
ParallelDomain4D [40]SyntheticOutdoors750k15015Driving8.6%
Carla [10]SyntheticOutdoors7k5Driving5.7%
+ +Table 6. An overview of all training datasets and sample ratio. All datasets provide both camera pose, depth, and most of them include dynamic objects. + +![](images/133a5be75fe9852f50d9dc75a3e28f6a3f8ed363159786f123db3e08abb2c86a.jpg) +Figure 8. Fast 3D reconstruction with our temporal motion module. Given a sequence of images less than temporal window length, our POMATO can directly obtain a global pointmap under the key frame coordinate. + +![](images/33c6bad035555d5e4e9fdaf94cf6b764db4cf1f575afef326383c07bccb07da0.jpg) +Figure 9. Compared with MonST3R, our POMATO can provide more complete dynamic masks and consistent geometry. + +![](images/1553dd9fea27bacf18cff9e88695799cf7d92dd8244a549641fdbd79c3b38df9.jpg) +Figure 10. MonST3R suffers obvious degeneration when the view displacement is large as reflected by the erroneous pose estimation while POMATO can still provide a consistent camera trajectory. \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05692/images/133a5be75fe9852f50d9dc75a3e28f6a3f8ed363159786f123db3e08abb2c86a.jpg b/data/2025/2504_05xxx/2504.05692/images/133a5be75fe9852f50d9dc75a3e28f6a3f8ed363159786f123db3e08abb2c86a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c61a87381c116accc7df61d7adc9f8f73d942a16 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/133a5be75fe9852f50d9dc75a3e28f6a3f8ed363159786f123db3e08abb2c86a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e73396e79f7a2c5682958e4d6e46cfa7fdb6fb4fc279ea02afa15728c2ab3499 +size 143964 diff --git a/data/2025/2504_05xxx/2504.05692/images/1553dd9fea27bacf18cff9e88695799cf7d92dd8244a549641fdbd79c3b38df9.jpg b/data/2025/2504_05xxx/2504.05692/images/1553dd9fea27bacf18cff9e88695799cf7d92dd8244a549641fdbd79c3b38df9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe86e68a0328b9fab84102a92854c7067336fe80 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/1553dd9fea27bacf18cff9e88695799cf7d92dd8244a549641fdbd79c3b38df9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d68981323d45cabbe474ad4b5f97b4b946bb682c3cfbdc37b03e7c33f1cb7b60 +size 272739 diff --git a/data/2025/2504_05xxx/2504.05692/images/17b51d4a6a4dec36c51e858d07bf3a17155c0a4b6326adbd345b3eed1d1b4ef0.jpg b/data/2025/2504_05xxx/2504.05692/images/17b51d4a6a4dec36c51e858d07bf3a17155c0a4b6326adbd345b3eed1d1b4ef0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e2386fd128fb5c3144e86f8545c89cb559c75a8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/17b51d4a6a4dec36c51e858d07bf3a17155c0a4b6326adbd345b3eed1d1b4ef0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cac6fd9c44d58aacb1064fac23aa9dac99368f72ae7487660ae4b820b24dc20 +size 46993 diff --git a/data/2025/2504_05xxx/2504.05692/images/2038d457d38fd0c221b12bdad5e2a377292a6a5391edc6521c9a1a933405292f.jpg b/data/2025/2504_05xxx/2504.05692/images/2038d457d38fd0c221b12bdad5e2a377292a6a5391edc6521c9a1a933405292f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..14c1ae3cb05bd72ffbd6364e5fda648af84eb3b2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/2038d457d38fd0c221b12bdad5e2a377292a6a5391edc6521c9a1a933405292f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97979fafd2325f23b0269ac01e2e47c6201d4b7ee6f6f1a343e9252790ffc110 +size 26903 diff --git a/data/2025/2504_05xxx/2504.05692/images/22f3ec6d130b32d236544f7a36f8d34c068ee43491968d8b8252afd707b98ed1.jpg b/data/2025/2504_05xxx/2504.05692/images/22f3ec6d130b32d236544f7a36f8d34c068ee43491968d8b8252afd707b98ed1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16bf0ac11d65934c958359f776b70637518d5bb5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/22f3ec6d130b32d236544f7a36f8d34c068ee43491968d8b8252afd707b98ed1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d79af7fedae8a3fa92f535d8bf37b9f110101c453d1b102d4bf43d2d507f2358 +size 10517 diff --git a/data/2025/2504_05xxx/2504.05692/images/2725a90e3c969c688b8b4aece0822e6dc29099b7a2ee22c8e885d7c14e48523e.jpg b/data/2025/2504_05xxx/2504.05692/images/2725a90e3c969c688b8b4aece0822e6dc29099b7a2ee22c8e885d7c14e48523e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3d5e7798fcde4816ee1bf2df84fbd33526258624 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/2725a90e3c969c688b8b4aece0822e6dc29099b7a2ee22c8e885d7c14e48523e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcc1295c4c6c57382bfa90499e5c15327297dd09fec26d39ffd81828ba311612 +size 3936 diff --git a/data/2025/2504_05xxx/2504.05692/images/33c6bad035555d5e4e9fdaf94cf6b764db4cf1f575afef326383c07bccb07da0.jpg b/data/2025/2504_05xxx/2504.05692/images/33c6bad035555d5e4e9fdaf94cf6b764db4cf1f575afef326383c07bccb07da0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0f7fac394e6ae2ead72c5e65ecb9ea489f35ae1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/33c6bad035555d5e4e9fdaf94cf6b764db4cf1f575afef326383c07bccb07da0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b97307d585c9e260cd945da280f9a915fbdace2765c2c4bbedae72b8a6646922 +size 260718 diff --git a/data/2025/2504_05xxx/2504.05692/images/48af71f3300680b8034c43f62dca0f4717a244f56bdddfab423db451007292d7.jpg b/data/2025/2504_05xxx/2504.05692/images/48af71f3300680b8034c43f62dca0f4717a244f56bdddfab423db451007292d7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..215488277b33d7d9081bf519cf62ccc51a323e4d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/48af71f3300680b8034c43f62dca0f4717a244f56bdddfab423db451007292d7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8752a6fc46025acffeb78c05811018ac8c92f548ed53125bb8ea1922cd451a2 +size 48078 diff --git a/data/2025/2504_05xxx/2504.05692/images/48fc00ed0ccff1357aa20e08011e8b828d049473515dabed56bb834c4997282a.jpg b/data/2025/2504_05xxx/2504.05692/images/48fc00ed0ccff1357aa20e08011e8b828d049473515dabed56bb834c4997282a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e79f8199b571c9869d260315b585c6025d06a987 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/48fc00ed0ccff1357aa20e08011e8b828d049473515dabed56bb834c4997282a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:751335f51eb159ac35ccebc6cdf0136b2595b6c0649e48587b149e0abb39883a +size 5567 diff --git a/data/2025/2504_05xxx/2504.05692/images/5ae28c25f828dac5cf5304f158000a3d4b15aaa5c22767f9cce24e599bcd63ea.jpg b/data/2025/2504_05xxx/2504.05692/images/5ae28c25f828dac5cf5304f158000a3d4b15aaa5c22767f9cce24e599bcd63ea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..189147bf8677a7865d18d427a59d9907b18a23d8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/5ae28c25f828dac5cf5304f158000a3d4b15aaa5c22767f9cce24e599bcd63ea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18a52f250f2c5331f300ba51c17755f7223ae317dc0a2325a15c58f6c4d1a342 +size 59500 diff --git a/data/2025/2504_05xxx/2504.05692/images/61e6296f3d781a21567857de4b2ea166d7a6b53edff37bc595f168c4d288e0ee.jpg b/data/2025/2504_05xxx/2504.05692/images/61e6296f3d781a21567857de4b2ea166d7a6b53edff37bc595f168c4d288e0ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3eab10de3d30b1d989822ce369f83265bdec4065 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/61e6296f3d781a21567857de4b2ea166d7a6b53edff37bc595f168c4d288e0ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6781f1d346bb7ba66167d52be4e1ab7268258c4a51dafd67611aa53e174dc2e0 +size 4541 diff --git a/data/2025/2504_05xxx/2504.05692/images/6a30322c438fc8178fdadc08700fa2c23601ca083bfd8c9601b0c19f2ade291e.jpg b/data/2025/2504_05xxx/2504.05692/images/6a30322c438fc8178fdadc08700fa2c23601ca083bfd8c9601b0c19f2ade291e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a2a66a7e6c9922657b1ab7608c54de66a2a4cd4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/6a30322c438fc8178fdadc08700fa2c23601ca083bfd8c9601b0c19f2ade291e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7363f7c4898703d3a0cc4ccadf37c2d830db0a104e6e8ebf67564abdd0510502 +size 46724 diff --git a/data/2025/2504_05xxx/2504.05692/images/77c2053e0882f10da0a9b7e380f417e6d9b57a1125876608e85426222f0b0d45.jpg b/data/2025/2504_05xxx/2504.05692/images/77c2053e0882f10da0a9b7e380f417e6d9b57a1125876608e85426222f0b0d45.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa2c557ead09deb3becbafae7ec9a75dc657080e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/77c2053e0882f10da0a9b7e380f417e6d9b57a1125876608e85426222f0b0d45.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff39d3ca14b3600c2a62d09085ebb4d1737788ede44a9f7f5754efc5e90ea141 +size 9501 diff --git a/data/2025/2504_05xxx/2504.05692/images/7e0a527c5d772d5170207369443f4492fcc8f881d8ee08801d58a55b0d28fec9.jpg b/data/2025/2504_05xxx/2504.05692/images/7e0a527c5d772d5170207369443f4492fcc8f881d8ee08801d58a55b0d28fec9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc0962a918f3fda04e3f5618b026b1b32f8a23ed --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/7e0a527c5d772d5170207369443f4492fcc8f881d8ee08801d58a55b0d28fec9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad7ca6e4a6cc49766080e54c3084ee22a299669852724b66c373bbdacb1678ea +size 3940 diff --git a/data/2025/2504_05xxx/2504.05692/images/7ef319305d817284a367ded55f397489a47ecfddf523efa5658896a2d17354bf.jpg b/data/2025/2504_05xxx/2504.05692/images/7ef319305d817284a367ded55f397489a47ecfddf523efa5658896a2d17354bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..03be24e15f63f984bcfd472af76af98a8c2d5e38 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/7ef319305d817284a367ded55f397489a47ecfddf523efa5658896a2d17354bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8202e64aaac2858f19cce13d01af238974090f797e85df404603eb4febbb31b4 +size 53835 diff --git a/data/2025/2504_05xxx/2504.05692/images/80719c660669e2d285012f6663ec105a6137bf97e42c4107de2e9c454bfec2f8.jpg b/data/2025/2504_05xxx/2504.05692/images/80719c660669e2d285012f6663ec105a6137bf97e42c4107de2e9c454bfec2f8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b5fc00ad7f3cc9263cf1e4b0bf70b766bce74bce --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/80719c660669e2d285012f6663ec105a6137bf97e42c4107de2e9c454bfec2f8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4af656fcfdbd570d2237e7916f8fb520aecd11383c6832d55c5a5a57bc13aa8b +size 18066 diff --git a/data/2025/2504_05xxx/2504.05692/images/80953ea0dc5ca9ee59f95c9853c3873761fadebe7ab58f819e72258d3f71d946.jpg b/data/2025/2504_05xxx/2504.05692/images/80953ea0dc5ca9ee59f95c9853c3873761fadebe7ab58f819e72258d3f71d946.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ab4efa2aed6e69c4aaf39830d22d86d866533ec1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/80953ea0dc5ca9ee59f95c9853c3873761fadebe7ab58f819e72258d3f71d946.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d91bd59615386225254d852b30c5601879c203784310d315031db7bcf216c681 +size 9033 diff --git a/data/2025/2504_05xxx/2504.05692/images/8239ab20be4562800b2c68569d3d5ab2c42bca1f03dcdbe48b9eabdda64d41a0.jpg b/data/2025/2504_05xxx/2504.05692/images/8239ab20be4562800b2c68569d3d5ab2c42bca1f03dcdbe48b9eabdda64d41a0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b01be8f263c6f61beadde9fff2c487deaf1e9d84 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/8239ab20be4562800b2c68569d3d5ab2c42bca1f03dcdbe48b9eabdda64d41a0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40565fd35f1f21553ff3b07e44efa2cdfa0d6209622545eac3c1f998d93b5d10 +size 82404 diff --git a/data/2025/2504_05xxx/2504.05692/images/865c5f7f18ca194c59889f3437641b2c340f0dae47f7ba5904d226f77a02757d.jpg b/data/2025/2504_05xxx/2504.05692/images/865c5f7f18ca194c59889f3437641b2c340f0dae47f7ba5904d226f77a02757d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5e9b1e26b1be8e8e5623519ff1b51399c583e5b2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/865c5f7f18ca194c59889f3437641b2c340f0dae47f7ba5904d226f77a02757d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4bf83b73892abad337247ae318281799a732376b0526d13d43dd574c7e87522 +size 30347 diff --git a/data/2025/2504_05xxx/2504.05692/images/98a876741c25930c1c2f4bfce1d1c178379109f82b67d15153cad3df741cec13.jpg b/data/2025/2504_05xxx/2504.05692/images/98a876741c25930c1c2f4bfce1d1c178379109f82b67d15153cad3df741cec13.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b1b23dd43f9b13c8a3787122e9c6952c5f70c92 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/98a876741c25930c1c2f4bfce1d1c178379109f82b67d15153cad3df741cec13.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e431361794fe4634a49a551237695951edbfae4c79024761f1d78cd5cffa5700 +size 6232 diff --git a/data/2025/2504_05xxx/2504.05692/images/a00bf8bcacaf47797f494012a320e2a40fdd47220aa6b3eb0c87cdcafa2a834a.jpg b/data/2025/2504_05xxx/2504.05692/images/a00bf8bcacaf47797f494012a320e2a40fdd47220aa6b3eb0c87cdcafa2a834a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..acca3bd16d6593aef9d85fb990440e3068904d56 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/a00bf8bcacaf47797f494012a320e2a40fdd47220aa6b3eb0c87cdcafa2a834a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b3b43feb539431c4c495af7e7bffd0f128ee9333bbc7007c13048231f062e55 +size 21789 diff --git a/data/2025/2504_05xxx/2504.05692/images/a14c594c62cedd5e32d97e3ea1a176b458e13f8f8cdad1bfc378e63d14129e06.jpg b/data/2025/2504_05xxx/2504.05692/images/a14c594c62cedd5e32d97e3ea1a176b458e13f8f8cdad1bfc378e63d14129e06.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b30ee716102a1b49072301d7d6a68ad4378ad8d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/a14c594c62cedd5e32d97e3ea1a176b458e13f8f8cdad1bfc378e63d14129e06.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:011ca5ca18c1bc37641ffcd48a49e63e06ea4df83aae2739ca64252c5570a1fa +size 3771 diff --git a/data/2025/2504_05xxx/2504.05692/images/a572bf8f3f31147ee0b7fe7ce8470f549bc4ee48d2b9a7606bf4cb6e04f57753.jpg b/data/2025/2504_05xxx/2504.05692/images/a572bf8f3f31147ee0b7fe7ce8470f549bc4ee48d2b9a7606bf4cb6e04f57753.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f0c47f03fbac30501efc5485a7466d9ae49f844d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/a572bf8f3f31147ee0b7fe7ce8470f549bc4ee48d2b9a7606bf4cb6e04f57753.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0590fface92dabc18c7d0573a7d8ee535c7a01f236777a7b596a4a6b1209abc1 +size 21601 diff --git a/data/2025/2504_05xxx/2504.05692/images/abf58e9ec06158973e58dbe4a60955efcc728a923015cf9b8a45e5acad73ffcb.jpg b/data/2025/2504_05xxx/2504.05692/images/abf58e9ec06158973e58dbe4a60955efcc728a923015cf9b8a45e5acad73ffcb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0624adb9f526afe6bf84231ddd4b9c2845b1243d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/abf58e9ec06158973e58dbe4a60955efcc728a923015cf9b8a45e5acad73ffcb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6725beeaba76ef4320aab17bf8877b1a6eac76088eff69c3a244a5d70f8ca6a +size 17241 diff --git a/data/2025/2504_05xxx/2504.05692/images/c01e277c9d6d6d62313b376a0e50c866e3c5aa3c837fecebdb575154bd8f9543.jpg b/data/2025/2504_05xxx/2504.05692/images/c01e277c9d6d6d62313b376a0e50c866e3c5aa3c837fecebdb575154bd8f9543.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0b88eac6453ac965cf8be32934d5cb88722a04c6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/c01e277c9d6d6d62313b376a0e50c866e3c5aa3c837fecebdb575154bd8f9543.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d49279f30395bd08f4a71034cee5260f5bb0582432069711507928b3d427857 +size 6436 diff --git a/data/2025/2504_05xxx/2504.05692/images/d5e4b9b2b316a0891838b1d256865499a344168e46176581f1098b0fa64d8c42.jpg b/data/2025/2504_05xxx/2504.05692/images/d5e4b9b2b316a0891838b1d256865499a344168e46176581f1098b0fa64d8c42.jpg new file mode 100644 index 0000000000000000000000000000000000000000..190b359e26bf9622612a61d9e01ea18a1bf30f94 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/d5e4b9b2b316a0891838b1d256865499a344168e46176581f1098b0fa64d8c42.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08aa879a4ca90fecb8116881f757ad36f60d4b282788b06c3355dd894ef473a6 +size 29814 diff --git a/data/2025/2504_05xxx/2504.05692/images/d664dc6c171f8e6a159f2659a62cc7b36112bf928cff2e86cd189897267afe7a.jpg b/data/2025/2504_05xxx/2504.05692/images/d664dc6c171f8e6a159f2659a62cc7b36112bf928cff2e86cd189897267afe7a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..920b05a0466fbcc56332c44c2583a59f9746a5c6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/d664dc6c171f8e6a159f2659a62cc7b36112bf928cff2e86cd189897267afe7a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4eee4a0cd7cf8cd1f2a250e5f4dba7629c177842eef06d4c29082288b5518b19 +size 87368 diff --git a/data/2025/2504_05xxx/2504.05692/images/e2fe70253e4295dea812bf5a75af67c019b523b5a34fe2c4ad961025ba265b7c.jpg b/data/2025/2504_05xxx/2504.05692/images/e2fe70253e4295dea812bf5a75af67c019b523b5a34fe2c4ad961025ba265b7c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec00600825bf93f0984b4eabe00e42b928b07c24 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/e2fe70253e4295dea812bf5a75af67c019b523b5a34fe2c4ad961025ba265b7c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee8e5ceb47d69570668a7cada506f9254ba7ba0933b358762f8c139a0d6c3726 +size 31119 diff --git a/data/2025/2504_05xxx/2504.05692/images/e6d6b6d0d562795bdee50bef700038574cb99c8c3950f3ff6313b68af3401c0f.jpg b/data/2025/2504_05xxx/2504.05692/images/e6d6b6d0d562795bdee50bef700038574cb99c8c3950f3ff6313b68af3401c0f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ca6f68efba35eff73e684663dc6873164b0d67c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/e6d6b6d0d562795bdee50bef700038574cb99c8c3950f3ff6313b68af3401c0f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f27d2b0198e865972c5c7c3a8b155e7e54e586534b76a7ea363049c9b171eb8 +size 37090 diff --git a/data/2025/2504_05xxx/2504.05692/images/f5f28d1c790be8bf149f762f29cd56a6fb328dbe358471623ca58d55907612bc.jpg b/data/2025/2504_05xxx/2504.05692/images/f5f28d1c790be8bf149f762f29cd56a6fb328dbe358471623ca58d55907612bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9bfbd2038049b7004214b9c39f19bd9520f00a24 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/f5f28d1c790be8bf149f762f29cd56a6fb328dbe358471623ca58d55907612bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59d154c969a0713529b9831282edf79fbf9da978e2a4e063332ccf637bfbdd0f +size 46752 diff --git a/data/2025/2504_05xxx/2504.05692/images/f80df26c4822f115831822b26fcd2f96867473824b7b633de27b28e6f007792b.jpg b/data/2025/2504_05xxx/2504.05692/images/f80df26c4822f115831822b26fcd2f96867473824b7b633de27b28e6f007792b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d429e9af260f260669696cf52a7100e12422aad --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/f80df26c4822f115831822b26fcd2f96867473824b7b633de27b28e6f007792b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5024b00bc5861a1dfb9dfba86643677852eb7d85747ac8576f2db51831ab1e3 +size 9047 diff --git a/data/2025/2504_05xxx/2504.05692/images/f983987587b8b2a5201e5774573c13907336c1b5f24633e99ad0d6043e4236dd.jpg b/data/2025/2504_05xxx/2504.05692/images/f983987587b8b2a5201e5774573c13907336c1b5f24633e99ad0d6043e4236dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..05811e05da22200f14f8231af8638dd1890ec1c1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/f983987587b8b2a5201e5774573c13907336c1b5f24633e99ad0d6043e4236dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a97cd1d8784f88c880fd9918264e0979aac9f66e0026a7fc3434e17768c05d35 +size 5209 diff --git a/data/2025/2504_05xxx/2504.05692/images/fe0b1e2fbadeaa2594c8eb30df08a6fe10394011cc93cc13223e9ca718be9a53.jpg b/data/2025/2504_05xxx/2504.05692/images/fe0b1e2fbadeaa2594c8eb30df08a6fe10394011cc93cc13223e9ca718be9a53.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6462fde4c90ce495990bb3ddde9401b3a70cc770 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/fe0b1e2fbadeaa2594c8eb30df08a6fe10394011cc93cc13223e9ca718be9a53.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd052337d19780813be9751515274efdab1da968ceeb9210a02adaae11e557f8 +size 4635 diff --git a/data/2025/2504_05xxx/2504.05692/images/ffa2a8e80de9dcbc736d37d3eb5d1989e36111cb092a54c8fa4c690e19d35a0a.jpg b/data/2025/2504_05xxx/2504.05692/images/ffa2a8e80de9dcbc736d37d3eb5d1989e36111cb092a54c8fa4c690e19d35a0a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9debb9fc53b58c09e3095899e9dcd30c0796688a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/images/ffa2a8e80de9dcbc736d37d3eb5d1989e36111cb092a54c8fa4c690e19d35a0a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82d0ce015f6f070655dd001dc8a88a5fe42d59f47dd1f80d309fabcf84991125 +size 131316 diff --git a/data/2025/2504_05xxx/2504.05692/layout.json b/data/2025/2504_05xxx/2504.05692/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..173376b665add3026bc9a679950d18a77ee3767f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05692/layout.json @@ -0,0 +1,9268 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 102, + 102, + 509, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 102, + 509, + 138 + ], + "spans": [ + { + "bbox": [ + 102, + 102, + 509, + 138 + ], + "type": "text", + "content": "POMATO: Marrying Pointmap Matching with Temporal Motions for Dynamic 3D Reconstruction" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 131, + 159, + 475, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 159, + 475, + 190 + ], + "spans": [ + { + "bbox": [ + 131, + 159, + 475, + 190 + ], + "type": "text", + "content": "Songyan Zhang" + }, + { + "bbox": [ + 131, + 159, + 475, + 190 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 131, + 159, + 475, + 190 + ], + "type": "text", + "content": " Yongtao Ge" + }, + { + "bbox": [ + 131, + 159, + 475, + 190 + ], + "type": "inline_equation", + "content": "^{2,3*}" + }, + { + "bbox": [ + 131, + 159, + 475, + 190 + ], + "type": "text", + "content": " Jinyuan Tian" + }, + { + "bbox": [ + 131, + 159, + 475, + 190 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 131, + 159, + 475, + 190 + ], + "type": "text", + "content": " Guangkai Xu" + }, + { + "bbox": [ + 131, + 159, + 475, + 190 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 131, + 159, + 475, + 190 + ], + "type": "text", + "content": " Hao Chen" + }, + { + "bbox": [ + 131, + 159, + 475, + 190 + ], + "type": "inline_equation", + "content": "^{2\\boxtimes}" + }, + { + "bbox": [ + 131, + 159, + 475, + 190 + ], + "type": "text", + "content": " Chen Lv" + }, + { + "bbox": [ + 131, + 159, + 475, + 190 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 131, + 159, + 475, + 190 + ], + "type": "text", + "content": " Chunhua Shen" + }, + { + "bbox": [ + 131, + 159, + 475, + 190 + ], + "type": "inline_equation", + "content": "^{2}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 114, + 194, + 497, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 194, + 497, + 224 + ], + "spans": [ + { + "bbox": [ + 114, + 194, + 497, + 224 + ], + "type": "text", + "content": "1Nanyang Technological University, Singapore 2Zhejiang University, China 3The University of Adelaide, Australia" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 58, + 254, + 122, + 459 + ], + "blocks": [ + { + "bbox": [ + 58, + 254, + 122, + 459 + ], + "lines": [ + { + "bbox": [ + 58, + 254, + 122, + 459 + ], + "spans": [ + { + "bbox": [ + 58, + 254, + 122, + 459 + ], + "type": "image", + "image_path": "2038d457d38fd0c221b12bdad5e2a377292a6a5391edc6521c9a1a933405292f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 470, + 555, + 492 + ], + "lines": [ + { + "bbox": [ + 55, + 470, + 555, + 492 + ], + "spans": [ + { + "bbox": [ + 55, + 470, + 555, + 492 + ], + "type": "text", + "content": "Figure 1. 3D reconstruction from an arbitrary dynamic video with POMATO. Without relying on external modules, POMATO can directly perform 3D reconstruction along with temporal 3D point tracking and dynamic mask estimation." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 126, + 257, + 373, + 459 + ], + "blocks": [ + { + "bbox": [ + 126, + 257, + 373, + 459 + ], + "lines": [ + { + "bbox": [ + 126, + 257, + 373, + 459 + ], + "spans": [ + { + "bbox": [ + 126, + 257, + 373, + 459 + ], + "type": "image", + "image_path": "7ef319305d817284a367ded55f397489a47ecfddf523efa5658896a2d17354bf.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 376, + 258, + 547, + 456 + ], + "blocks": [ + { + "bbox": [ + 376, + 258, + 547, + 456 + ], + "lines": [ + { + "bbox": [ + 376, + 258, + 547, + 456 + ], + "spans": [ + { + "bbox": [ + 376, + 258, + 547, + 456 + ], + "type": "image", + "image_path": "17b51d4a6a4dec36c51e858d07bf3a17155c0a4b6326adbd345b3eed1d1b4ef0.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 151, + 502, + 200, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 502, + 200, + 514 + ], + "spans": [ + { + "bbox": [ + 151, + 502, + 200, + 514 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 54, + 528, + 295, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 528, + 295, + 694 + ], + "spans": [ + { + "bbox": [ + 54, + 528, + 295, + 694 + ], + "type": "text", + "content": "Recent approaches to 3D reconstruction in dynamic scenes primarily rely on the integration of separate geometry estimation and matching modules, where the latter plays a critical role in distinguishing dynamic regions and mitigating the interference caused by moving objects. Furthermore, the matching module explicitly models object motion, enabling the tracking of specific targets and advancing motion understanding in complex scenarios. Recently, the proposed representation of pointmap in DUSt3R suggests a potential solution to unify both geometry estimation and matching in 3D space, effectively reducing computational overhead by eliminating the need for redundant auxiliary modules. However, it still struggles with ambiguous correspondences in dynamic regions, which limits reconstruction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 504, + 555, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 504, + 555, + 694 + ], + "spans": [ + { + "bbox": [ + 313, + 504, + 555, + 694 + ], + "type": "text", + "content": "performance in such scenarios. In this work, we present POMATO, a unified framework for dynamic 3D reconstruction by marrying POintmap MAtching with Temporal mOtion. Specifically, our method first learns an explicit matching relationship by mapping RGB pixels across different views to 3D pointmaps within a unified coordinate system. Furthermore, we introduce a temporal motion module for dynamic motions that ensures scale consistency across different frames and enhances performance in 3D reconstruction tasks requiring both precise geometry and reliable matching, most notably 3D point tracking. We show the effectiveness of our proposed POMATO by demonstrating the remarkable performance across multiple downstream tasks, including video depth estimation, 3D point tracking, and pose estimation. Code and models are publicly available at https://github.com/wyddmw/POMATO." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 217, + 37, + 572 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 217, + 37, + 572 + ], + "spans": [ + { + "bbox": [ + 14, + 217, + 37, + 572 + ], + "type": "text", + "content": "arXiv:2504.05692v2 [eess.IV] 8 Aug 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 702, + 222, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 702, + 222, + 712 + ], + "spans": [ + { + "bbox": [ + 70, + 702, + 222, + 712 + ], + "type": "text", + "content": "* Equal contribution. " + }, + { + "bbox": [ + 70, + 702, + 222, + 712 + ], + "type": "inline_equation", + "content": "\\boxdot" + }, + { + "bbox": [ + 70, + 702, + 222, + 712 + ], + "type": "text", + "content": " Corresponding author." + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 101, + 74, + 240, + 180 + ], + "blocks": [ + { + "bbox": [ + 61, + 95, + 97, + 107 + ], + "lines": [ + { + "bbox": [ + 61, + 95, + 97, + 107 + ], + "spans": [ + { + "bbox": [ + 61, + 95, + 97, + 107 + ], + "type": "text", + "content": "Image1" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 101, + 74, + 240, + 180 + ], + "lines": [ + { + "bbox": [ + 101, + 74, + 240, + 180 + ], + "spans": [ + { + "bbox": [ + 101, + 74, + 240, + 180 + ], + "type": "image", + "image_path": "a572bf8f3f31147ee0b7fe7ce8470f549bc4ee48d2b9a7606bf4cb6e04f57753.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 249, + 71, + 465, + 188 + ], + "blocks": [ + { + "bbox": [ + 249, + 71, + 465, + 188 + ], + "lines": [ + { + "bbox": [ + 249, + 71, + 465, + 188 + ], + "spans": [ + { + "bbox": [ + 249, + 71, + 465, + 188 + ], + "type": "image", + "image_path": "865c5f7f18ca194c59889f3437641b2c340f0dae47f7ba5904d226f77a02757d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 198, + 555, + 243 + ], + "lines": [ + { + "bbox": [ + 54, + 198, + 555, + 243 + ], + "spans": [ + { + "bbox": [ + 54, + 198, + 555, + 243 + ], + "type": "text", + "content": "Figure 2. Ambiguity in 3D point matching in dynamic scenes with DUSt3R. Given representative corresponding pixels of background (orange) and moving foreground (red) in two different views, DUSt3R outputs a pair of 3D points within the same coordinate system. In static regions, identical pixels share the same 3D coordinates which provide an accurate matching relationship in 3D space, but in moving regions, the 3D coordinates are inconsistent for corresponding pixels across views, leading to ambiguous 3D matching relationships." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 466, + 74, + 550, + 180 + ], + "blocks": [ + { + "bbox": [ + 466, + 74, + 550, + 180 + ], + "lines": [ + { + "bbox": [ + 466, + 74, + 550, + 180 + ], + "spans": [ + { + "bbox": [ + 466, + 74, + 550, + 180 + ], + "type": "image", + "image_path": "22f3ec6d130b32d236544f7a36f8d34c068ee43491968d8b8252afd707b98ed1.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 251, + 135, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 251, + 135, + 262 + ], + "spans": [ + { + "bbox": [ + 55, + 251, + 135, + 262 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 275, + 296, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 275, + 296, + 562 + ], + "spans": [ + { + "bbox": [ + 56, + 275, + 296, + 562 + ], + "type": "text", + "content": "Image-based 3D reconstruction is a fundamental task in computer vision with a wide range of applications including SLAM [39], robotics [19, 49], autonomous driving [53], and novel view synthesis [5]. While substantial progress has been achieved in static 3D reconstruction [16, 23, 26, 44, 51], dynamic scenes remain a major hurdle due to complexities like non-rigid motion and deformation, which may hamper the learning of local structure and camera motion, thereby complicating accurate 3D reconstruction for dynamic scenes. These scenarios require explicit modeling of both scene geometry and object motion. Moreover, downstream reconstruction tasks, such as 3D point tracking, demand precise geometry estimation and robust matching across views. To effectively distinguish dynamic regions, it is essential to establish reliable correspondences between different frames. Some pioneering works have attempted to address dynamic motion by incorporating additional auxiliary matching modules, such as optical flow [42, 52] or 2D tracking [47]. However, these approaches may suffer from domain gaps and accumulated errors between modules, limiting their effectiveness. A unified framework that seamlessly integrates geometry estimation and matching for dynamic 3D reconstruction remains a critical and underexplored challenge." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 566, + 295, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 566, + 295, + 685 + ], + "spans": [ + { + "bbox": [ + 55, + 566, + 295, + 685 + ], + "type": "text", + "content": "Recently, DUSt3R [44] proposes a promising solution to address this challenge. It introduces the concept of a pointmap that assigns each pixel in an image to a corresponding 3D coordinate. The network utilizes a standard transformer-based encoder-decoder architecture and receives a pair of images as input. The system incorporates two parallel decoders to predict pointmaps for each view within the same coordinate system. However, this representation is limited to static matching and struggles in dynamic scenes, as illustrated in Fig. 2." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "content": "To address this problem, we present POMATO, a unified network for dynamic 3D reconstruction by marrying" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 251, + 555, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 251, + 555, + 610 + ], + "spans": [ + { + "bbox": [ + 313, + 251, + 555, + 610 + ], + "type": "text", + "content": "POintmap MMatching with Temporal mOtion. We argue that with iterative cross-attention modules across different views, matching features are well preserved in the decoder tokens. We thus introduce an auxiliary pointmap matching head to learn explicit correspondences. Specifically, for each pixel in the second view, the pointmap matching head predicts the corresponding 3D coordinates of its counterpart in the first view, under the shared coordinate system. Our proposed pointmap-based matching representation enables the establishment of explicit correspondences in 3D space, which can be directly leveraged for motion analysis, especially the estimation of dynamic regions. Moreover, we further extend our POMATO to handle 4D video sequences by introducing a temporal motion module that enhances the learning of temporal motions. This motion module promotes scale consistency across different frames and improves performance in tasks where both accurate geometry and reliable matching are paramount, most notably 3D point tracking. Compared with recent temporal 3D reconstruction methods [41, 43] based on an autoregression manner where the previous frames are blocked from the recently added frames, our temporal motion module is based on the self-attention mechanism along the temporal dimension, facilitating a comprehensive interaction across all frames. Our POMATO is trained in a two-stage manner. In the first stage, we used pairwise input images to learn fundamental geometry and matching capabilities. In the second stage, we extend the input to sequential video frames and incorporate the temporal motion module, enabling the model to effectively capture motions over time." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 617, + 556, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 556, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 556, + 714 + ], + "type": "text", + "content": "Our contributions can be summarized in threefold: First, we propose a novel approach that unifies the fundamental geometry estimation and motion understanding for dynamic 3D reconstruction into a single network by incorporating the representation of pointmap matching. Second, we introduce a temporal motion module to facilitate the interactions of motion features along the temporal dimension, which significantly improves the performance in tasks" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 133 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 133 + ], + "type": "text", + "content": "where both accurate geometry and precise matching are required for video sequential input, most notably 3D point tracking. Third, we demonstrate promising performance on 3D vision tasks, including video depth estimation, 3D point tracking, and camera pose estimation." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 143, + 142, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 143, + 142, + 156 + ], + "spans": [ + { + "bbox": [ + 55, + 143, + 142, + 156 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 163, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 163, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 163, + 296, + 715 + ], + "type": "text", + "content": "Geometry estimation refers to the process of determining the spatial properties and structures from different forms of visual data. Direct recovery of 3D geometry from a single RGB image is by nature an ill-posed problem. Many recent works [3, 16, 23, 51] have tried to leverage strong pre-trained models to learn generalizable depthmaps from large-scale real and synthetic datasets to solve ambiguities. For example, Marigold [23], Geowizard [11], and GenPercept [48] aim at leveraging the generative priors from pre-trained diffusion models by finetuning them on synthetic datasets. Depthanything V2 [51] proposes to estimate scale-and-shift invariant disparity map by finetuning DINOV2 [29] model on synthetic datasets and largescale pseudo labels. Depth Pro [3] further propose a FOV head to estimate the metric depthmap from a single image without relying on camera intrinsics as input. Due to the scale ambiguity in the monocular depth estimation models, ChronoDepth [36], DepthCrafter [17], and Depth-any-video [50] proposes to learn temporal consistent depthmaps by leveraging the priors from a video generative model, i.e. SVD [2]. In another line of the research, multi-view stereo reconstruction (MVS) methods seek to reconstruct visible surfaces from multiple viewpoints. Traditional MVS [12] and SfM pipelines break the reconstruction pipeline into several sub-problems, e.g., feature extraction [8], image matching [1, 27], triangulation, and bundle adjustment [7]. The chain is complicated and accumulates noise for every single step, thus often resulting in unsatisfactory performance in complex real-world scenes. Recognizing the limitations of previous MVS methods, seminal work DUSt3R [44] proposes 3D pointmaps representation, and trains a network from large-scale data to regress the dense and accurate pointmaps from a pair of images. The camera intrinsics and relative camera poses can be implicitly inferred from the two-view pointmaps. However, it still can not handle reconstruction for dynamic scenes. MonST3R [52] directly finetuned the original DUSt3R model upon synthetic datasets that contain dynamic scenes. Motion representation. Optical flow is a commonly used representation for 2D motion. RAFT [38] is a representative work for pairwise optical flow estimation, which employs a 4D cost volume and recurrently estimates the optical flow. Some follow-up methods further extend it to multi-frame (3-5 frames) settings, which is still insufficient for long-range tracking. To resolve the problem, Particle Video [35] represent video motion by using a set of particles. Each" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 313, + 72, + 553, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 227 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 227 + ], + "type": "text", + "content": "particle is an image point sample with a long-duration trajectory and other properties. Particle videos have two key advantages over optical flow: (1) persistence through occlusions, and (2) multi-frame temporal context. Some recent works, PIPs [15], TAPIR [9] and Cotracker [22] have renewed interest in this representation and show promising long-term 2D point tracking results. Recognizing the advantage of point representation, SpatialTracker [47] lifts the 2D points into 3D and performs tracking in the 3D space. Though it can handle occlusions and enhance 3D tracking accuracy, it still relies on a separate monocular depth estimator, which prevents it performing 3D point tracking in an end-to-end fashion." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "spans": [ + { + "bbox": [ + 313, + 228, + 555, + 407 + ], + "type": "text", + "content": "Multi-view dynamic reconstruction. Our work is closely connected to multi-view dynamic 3D reconstruction techniques. Early works [32, 34] take the straightforward idea that first pre-segment the scene into different regions, each corresponding to a single rigid part of an object, then apply the rigid-SfM technique to each of the regions. Some of the recent Neural Radiance Fields (NeRF) [28] and Gaussian Splatting [24] based methods have achieved state-of-the-art results. However, most of these methods require simultaneous multi-view video inputs or require predefined templates [18]. Shape of motion [42], proposes a new dynamic scene representation to represent the dynamic scene as a set of persistent 3D Gaussians, and optimize the representation from a monocular video by leveraging monocular depth estimation priors and 2D track estimates across frames." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 314, + 417, + 370, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 417, + 370, + 430 + ], + "spans": [ + { + "bbox": [ + 314, + 417, + 370, + 430 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 437, + 394, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 437, + 394, + 449 + ], + "spans": [ + { + "bbox": [ + 313, + 437, + 394, + 449 + ], + "type": "text", + "content": "3.1. Preliminary" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "spans": [ + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "text", + "content": "The overview of our POMATO is demonstrated in Fig.3. We adopt the definition of pointmap " + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "inline_equation", + "content": "\\mathbf{X} \\in \\mathbb{R}^{H \\times W \\times 3}" + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "text", + "content": " in DUSt3R [44] as a dense 2D field of 3D points where each point corresponds to its respective RGB pixel. Given a pair of input images " + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^1, \\mathbf{I}^2 \\in \\mathbb{R}^{H \\times W \\times 3}" + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "text", + "content": " from two different views, a weight-sharing ViT first extracts the corresponding features " + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^1, \\mathbf{F}^2" + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "text", + "content": " for each view. Two parallel branches are employed to decode the geometric structures and enhance the feature alignment via cross-attention in decoder modules, following a regression head to estimate pointmaps " + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "inline_equation", + "content": "\\mathbf{X}^{1,1}, \\mathbf{X}^{2,1} \\in \\mathbb{R}^{H \\times W \\times 3}" + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "text", + "content": " along with a confidence map " + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "inline_equation", + "content": "\\mathbf{C}^{1,1}, \\mathbf{C}^{2,1} \\in \\mathbb{R}^{H \\times W}" + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "text", + "content": " for each image view. Generally, " + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "inline_equation", + "content": "\\mathbf{X}^{n,m}" + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "text", + "content": " indicates the pointmap " + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "inline_equation", + "content": "\\mathbf{X}^n" + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "text", + "content": " from camera " + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "text", + "content": " expressed in camera " + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 313, + 454, + 555, + 634 + ], + "type": "text", + "content": "'s coordinate frame, which is obtained by a rigid transformation:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 381, + 654, + 553, + 669 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 654, + 553, + 669 + ], + "spans": [ + { + "bbox": [ + 381, + 654, + 553, + 669 + ], + "type": "interline_equation", + "content": "\\mathbf {X} ^ {n, m} = \\mathbf {P} _ {m} \\mathbf {P} _ {n} ^ {- 1} h \\left(\\mathbf {X} ^ {n}\\right), \\tag {1}", + "image_path": "a14c594c62cedd5e32d97e3ea1a176b458e13f8f8cdad1bfc378e63d14129e06.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{P}_m, \\mathbf{P}_n \\in \\mathbb{R}^{3 \\times 4}" + }, + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "type": "text", + "content": " are world-to-camera poses for camera " + }, + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "type": "text", + "content": " and camera " + }, + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "type": "text", + "content": ", respectively, and " + }, + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "type": "inline_equation", + "content": "h(\\mathbf{X}^n)" + }, + { + "bbox": [ + 313, + 677, + 555, + 715 + ], + "type": "text", + "content": " is a homogeneous mapping for the 3D coordinate in camera coordinate" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 70, + 328, + 189 + ], + "blocks": [ + { + "bbox": [ + 59, + 70, + 328, + 189 + ], + "lines": [ + { + "bbox": [ + 59, + 70, + 328, + 189 + ], + "spans": [ + { + "bbox": [ + 59, + 70, + 328, + 189 + ], + "type": "image", + "image_path": "6a30322c438fc8178fdadc08700fa2c23601ca083bfd8c9601b0c19f2ade291e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 197, + 555, + 242 + ], + "lines": [ + { + "bbox": [ + 54, + 197, + 555, + 242 + ], + "spans": [ + { + "bbox": [ + 54, + 197, + 555, + 242 + ], + "type": "text", + "content": "Figure 3. Overview of our training pipeline. (1) Stage I: build upon DUSt3R [44] architecture, we introduce a third regression point-matching head: " + }, + { + "bbox": [ + 54, + 197, + 555, + 242 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_3" + }, + { + "bbox": [ + 54, + 197, + 555, + 242 + ], + "type": "text", + "content": ", which is in parallel to " + }, + { + "bbox": [ + 54, + 197, + 555, + 242 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_2" + }, + { + "bbox": [ + 54, + 197, + 555, + 242 + ], + "type": "text", + "content": " for explicit pointmap matching in 3D space. For each pixel in the second view, the output pointmap coordinate is the 3D point map of the corresponding pixel in the first view. (2) Stage II: we introduce a temporal fusion module in three heads that enables multi-style sequential input for learning temporal motions." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 328, + 70, + 552, + 189 + ], + "blocks": [ + { + "bbox": [ + 328, + 70, + 552, + 189 + ], + "lines": [ + { + "bbox": [ + 328, + 70, + 552, + 189 + ], + "spans": [ + { + "bbox": [ + 328, + 70, + 552, + 189 + ], + "type": "image", + "image_path": "e6d6b6d0d562795bdee50bef700038574cb99c8c3950f3ff6313b68af3401c0f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 251, + 109, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 251, + 109, + 262 + ], + "spans": [ + { + "bbox": [ + 55, + 251, + 109, + 262 + ], + "type": "text", + "content": "of camera " + }, + { + "bbox": [ + 55, + 251, + 109, + 262 + ], + "type": "inline_equation", + "content": "n" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 262, + 296, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 262, + 296, + 358 + ], + "spans": [ + { + "bbox": [ + 55, + 262, + 296, + 358 + ], + "type": "text", + "content": "The task for Decoder 1 and its regression head estimate the 3D points for " + }, + { + "bbox": [ + 55, + 262, + 296, + 358 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^1" + }, + { + "bbox": [ + 55, + 262, + 296, + 358 + ], + "type": "text", + "content": " in its own coordinate system while Decoder 2 and its regression head are responsible for estimating pixel-wise 3D coordinates for " + }, + { + "bbox": [ + 55, + 262, + 296, + 358 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^2" + }, + { + "bbox": [ + 55, + 262, + 296, + 358 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 55, + 262, + 296, + 358 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^1" + }, + { + "bbox": [ + 55, + 262, + 296, + 358 + ], + "type": "text", + "content": "'s coordinate system after a rigid transformation of global rotation and translation. In the following contents, we will first introduce our POMATO with pairwise input images and then extend it to the video sequence input with our temporal motion module." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 364, + 267, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 364, + 267, + 376 + ], + "spans": [ + { + "bbox": [ + 55, + 364, + 267, + 376 + ], + "type": "text", + "content": "3.2. Pointmap Matching with Pairwise Input" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "spans": [ + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "text", + "content": "As discussed before, the definition of " + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "inline_equation", + "content": "\\mathbf{X}^{2,1}" + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "text", + "content": " depicts a rigid camera transformation that is ambiguous to reflect explicit matching relationships for dynamic regions. To tackle this, we propose to formulate an explicit pointmap matching " + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_m^{2,1} \\in \\mathbb{R}^{H \\times W \\times 3}" + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "text", + "content": " that maps dense RGB pixels of " + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^2" + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "text", + "content": " to 3D coordinates of corresponding pixels in " + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^1" + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "text", + "content": " under the first image's coordinate system. Given a 2D query pixel at " + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "inline_equation", + "content": "(x_2, y_2)" + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^2" + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "text", + "content": " and its corresponding pixel at " + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "inline_equation", + "content": "(x_1, y_1)" + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^1" + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "text", + "content": ", the matched pointmap at " + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "inline_equation", + "content": "(x_2, y_2)" + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^2" + }, + { + "bbox": [ + 55, + 381, + 296, + 490 + ], + "type": "text", + "content": " is:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 114, + 493, + 294, + 507 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 493, + 294, + 507 + ], + "spans": [ + { + "bbox": [ + 114, + 493, + 294, + 507 + ], + "type": "interline_equation", + "content": "\\mathbf {X} _ {m} ^ {2, 1} \\left(x _ {2}, y _ {2}\\right) = \\mathbf {X} ^ {1, 1} \\left(x _ {1}, y _ {1}\\right), \\tag {2}", + "image_path": "fe0b1e2fbadeaa2594c8eb30df08a6fe10394011cc93cc13223e9ca718be9a53.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 511, + 296, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 511, + 296, + 654 + ], + "spans": [ + { + "bbox": [ + 55, + 511, + 296, + 654 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 511, + 296, + 654 + ], + "type": "inline_equation", + "content": "(x,y)" + }, + { + "bbox": [ + 55, + 511, + 296, + 654 + ], + "type": "text", + "content": " indicates the coordinates of 2D grid. For the representative dynamic point (red) in Fig. 2, the pointmap matching result is the 3D coordinate of point A in the coordinate system of the first image. As shown in Fig. 3, " + }, + { + "bbox": [ + 55, + 511, + 296, + 654 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_m^{2,1}" + }, + { + "bbox": [ + 55, + 511, + 296, + 654 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 511, + 296, + 654 + ], + "type": "inline_equation", + "content": "\\mathbf{X}^{1,1}" + }, + { + "bbox": [ + 55, + 511, + 296, + 654 + ], + "type": "text", + "content": " are supposed to match perfectly in 3D space on the premise of neglecting occluded regions. We argue that the set of decoder tokens from the second branch preserves abundant matching information with iterative cross-attentions, so we introduce a matching head with the same architecture of " + }, + { + "bbox": [ + 55, + 511, + 296, + 654 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_1" + }, + { + "bbox": [ + 55, + 511, + 296, + 654 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 511, + 296, + 654 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_2" + }, + { + "bbox": [ + 55, + 511, + 296, + 654 + ], + "type": "text", + "content": ". The supervision for pointmap matching " + }, + { + "bbox": [ + 55, + 511, + 296, + 654 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_m^{2,1}" + }, + { + "bbox": [ + 55, + 511, + 296, + 654 + ], + "type": "text", + "content": " still follows the 3D regression loss which is defined as the Euclidean distance:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 113, + 663, + 294, + 689 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 663, + 294, + 689 + ], + "spans": [ + { + "bbox": [ + 113, + 663, + 294, + 689 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {m}} = \\left\\| \\frac {1}{z _ {m}} \\mathbf {X} _ {m} ^ {2, 1} - \\frac {1}{\\bar {z} _ {m}} \\bar {\\mathbf {X}} _ {m} ^ {2, 1} \\right\\|, \\tag {3}", + "image_path": "f983987587b8b2a5201e5774573c13907336c1b5f24633e99ad0d6043e4236dd.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\bar{\\mathbf{X}}_m^{2,1}" + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": " is the ground truth pointmap matching, which can be obtained following Eq. 2 on the 2D tracking dataset" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 251, + 555, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 251, + 555, + 300 + ], + "spans": [ + { + "bbox": [ + 313, + 251, + 555, + 300 + ], + "type": "text", + "content": "with the depth and camera information. " + }, + { + "bbox": [ + 313, + 251, + 555, + 300 + ], + "type": "inline_equation", + "content": "z_{m},\\bar{z}_{m}" + }, + { + "bbox": [ + 313, + 251, + 555, + 300 + ], + "type": "text", + "content": " are the same norm factor defined in DUSt3R. The matching confidence " + }, + { + "bbox": [ + 313, + 251, + 555, + 300 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_m^{2,1}" + }, + { + "bbox": [ + 313, + 251, + 555, + 300 + ], + "type": "text", + "content": " is also learned following the confidence loss for " + }, + { + "bbox": [ + 313, + 251, + 555, + 300 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_1" + }, + { + "bbox": [ + 313, + 251, + 555, + 300 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 251, + 555, + 300 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_2" + }, + { + "bbox": [ + 313, + 251, + 555, + 300 + ], + "type": "text", + "content": " within valid regions:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 372, + 309, + 553, + 324 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 309, + 553, + 324 + ], + "spans": [ + { + "bbox": [ + 372, + 309, + 553, + 324 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {m c o n f}} = \\mathbf {C} _ {m} ^ {2, 1} \\mathcal {L} _ {\\mathrm {m}} - \\alpha \\log \\mathbf {C} _ {m} ^ {2, 1} \\tag {4}", + "image_path": "61e6296f3d781a21567857de4b2ea166d7a6b53edff37bc595f168c4d288e0ee.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 325, + 554, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 325, + 554, + 385 + ], + "spans": [ + { + "bbox": [ + 313, + 325, + 554, + 385 + ], + "type": "text", + "content": "The final loss " + }, + { + "bbox": [ + 313, + 325, + 554, + 385 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 313, + 325, + 554, + 385 + ], + "type": "text", + "content": " of our POMATO for pairwise input is a combination of predefined DUSt3R loss " + }, + { + "bbox": [ + 313, + 325, + 554, + 385 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{DUSt3R}}" + }, + { + "bbox": [ + 313, + 325, + 554, + 385 + ], + "type": "text", + "content": ", matching loss " + }, + { + "bbox": [ + 313, + 325, + 554, + 385 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{m}}" + }, + { + "bbox": [ + 313, + 325, + 554, + 385 + ], + "type": "text", + "content": ", and matching confidence loss " + }, + { + "bbox": [ + 313, + 325, + 554, + 385 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{mconf}}" + }, + { + "bbox": [ + 313, + 325, + 554, + 385 + ], + "type": "text", + "content": ". When training our POMATO for pairwise input images at the first stage, the parameters in the encoder are frozen." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 392, + 462, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 392, + 462, + 403 + ], + "spans": [ + { + "bbox": [ + 313, + 392, + 462, + 403 + ], + "type": "text", + "content": "3.3. Dynamic Mask Estimation" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "spans": [ + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "text", + "content": "Taking advantage of the explicit pointmap matching head, our POMATO can directly perform dynamic mask estimation without introducing an assistant module such as the optical flow model, getting rid of the additional computation cost and the potential domain gap. For an image pair " + }, + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}^i,\\mathbf{I}^j\\}" + }, + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "text", + "content": " along with the estimation of " + }, + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{X}^{j,i}" + }, + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_2" + }, + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{m}^{j,i}" + }, + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_3" + }, + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "text", + "content": ", the dynamic mask " + }, + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{D}^{j,i}" + }, + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "text", + "content": " can be obtained by comparing the difference between " + }, + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{X}^{j,i}" + }, + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{m}^{j,i}" + }, + { + "bbox": [ + 313, + 409, + 555, + 506 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 376, + 512, + 553, + 526 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 376, + 512, + 553, + 526 + ], + "spans": [ + { + "bbox": [ + 376, + 512, + 553, + 526 + ], + "type": "interline_equation", + "content": "\\mathbf {D} ^ {j, i} = \\left| \\left| \\mathbf {X} _ {m} ^ {j, i} - \\mathbf {X} ^ {j, i} \\right| \\right| > \\alpha , \\tag {5}", + "image_path": "2725a90e3c969c688b8b4aece0822e6dc29099b7a2ee22c8e885d7c14e48523e.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 533, + 555, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 533, + 555, + 617 + ], + "spans": [ + { + "bbox": [ + 313, + 533, + 555, + 617 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 533, + 555, + 617 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 533, + 555, + 617 + ], + "type": "text", + "content": " is a dynamic threshold defined as " + }, + { + "bbox": [ + 313, + 533, + 555, + 617 + ], + "type": "inline_equation", + "content": "3 \\times" + }, + { + "bbox": [ + 313, + 533, + 555, + 617 + ], + "type": "text", + "content": " median " + }, + { + "bbox": [ + 313, + 533, + 555, + 617 + ], + "type": "inline_equation", + "content": "(\\|\\mathbf{X}_m^{j,i} - \\mathbf{X}^{j,i}\\|)" + }, + { + "bbox": [ + 313, + 533, + 555, + 617 + ], + "type": "text", + "content": ". The explicit dynamic mask can be incorporated into the global alignment process to minimize the interference of moving objects for pose estimation and 3D reconstruction. Details on the incorporation of dynamic masks for global alignment are provided in the supplementary materials." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 624, + 458, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 624, + 458, + 636 + ], + "spans": [ + { + "bbox": [ + 313, + 624, + 458, + 636 + ], + "type": "text", + "content": "3.4. Temporal Motion Module" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 641, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 641, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 641, + 554, + 713 + ], + "type": "text", + "content": "With the fundamental capability of geometric estimation and pointmap matching for pairwise images, we follow [6] and extend our POMATO to 4D video sequences by inserting a transformer-based motion module into the vanilla DPT head to construct the \"temporal DPT head\", which is illustrated in Fig.4. For a set of decoder tokens " + }, + { + "bbox": [ + 313, + 641, + 554, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{G} \\in \\mathbb{R}^{B,T,N,C}" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 80, + 69, + 270, + 224 + ], + "blocks": [ + { + "bbox": [ + 80, + 69, + 270, + 224 + ], + "lines": [ + { + "bbox": [ + 80, + 69, + 270, + 224 + ], + "spans": [ + { + "bbox": [ + 80, + 69, + 270, + 224 + ], + "type": "image", + "image_path": "e2fe70253e4295dea812bf5a75af67c019b523b5a34fe2c4ad961025ba265b7c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 235, + 296, + 269 + ], + "lines": [ + { + "bbox": [ + 55, + 235, + 296, + 269 + ], + "spans": [ + { + "bbox": [ + 55, + 235, + 296, + 269 + ], + "type": "text", + "content": "Figure 4. Architecture of our temporal motion module. We insert a transformer-based motion module (in shallow yellow) into the vanilla DPT [33] head to enhance the temporal consistency." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 277, + 296, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 277, + 296, + 374 + ], + "spans": [ + { + "bbox": [ + 55, + 277, + 296, + 374 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 277, + 296, + 374 + ], + "type": "inline_equation", + "content": "B, T, N, C" + }, + { + "bbox": [ + 55, + 277, + 296, + 374 + ], + "type": "text", + "content": " represent the batch size, window length of a video sequence, token number, and token dimension, respectively, we merge the token number dimension into the batch axis and apply the motion module which consists of two blocks of standard multi-head self-attention modules and feed-forward networks along the temporal dimension " + }, + { + "bbox": [ + 55, + 277, + 296, + 374 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 55, + 277, + 296, + 374 + ], + "type": "text", + "content": ". To reduce the computation cost, the temporal motion modules are applied to features of low resolution." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 381, + 216, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 381, + 216, + 395 + ], + "spans": [ + { + "bbox": [ + 55, + 381, + 216, + 395 + ], + "type": "text", + "content": "3.5. Downstream Temporal Tasks" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 399, + 296, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 399, + 296, + 507 + ], + "spans": [ + { + "bbox": [ + 55, + 399, + 296, + 507 + ], + "type": "text", + "content": "Given a video sequence of " + }, + { + "bbox": [ + 55, + 399, + 296, + 507 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 55, + 399, + 296, + 507 + ], + "type": "text", + "content": " frames " + }, + { + "bbox": [ + 55, + 399, + 296, + 507 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^{t_1},\\mathbf{I}^{t_2},\\ldots ,\\mathbf{I}^{t_T}" + }, + { + "bbox": [ + 55, + 399, + 296, + 507 + ], + "type": "text", + "content": ", we construct a unique set of stereo image pairs for each task. As illustrated in Fig. 5, the flexible construction of input pairs—combined with the proposed temporal motion module and pointmap matching head—enables POMATO to seamlessly address downstream temporal tasks, including 3D point tracking, video depth estimation, and 3D reconstruction. The keyframe selection strategy and input formulation for each task are detailed in the following section." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 508, + 296, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 508, + 296, + 554 + ], + "spans": [ + { + "bbox": [ + 55, + 508, + 296, + 554 + ], + "type": "text", + "content": "Besides the default regression losses for " + }, + { + "bbox": [ + 55, + 508, + 296, + 554 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_1" + }, + { + "bbox": [ + 55, + 508, + 296, + 554 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 508, + 296, + 554 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_2" + }, + { + "bbox": [ + 55, + 508, + 296, + 554 + ], + "type": "text", + "content": ", and predefined losses Eq. 3 and Eq. 4 for " + }, + { + "bbox": [ + 55, + 508, + 296, + 554 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_3" + }, + { + "bbox": [ + 55, + 508, + 296, + 554 + ], + "type": "text", + "content": ", we further employ a temporal consistency loss, " + }, + { + "bbox": [ + 55, + 508, + 296, + 554 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{t}}" + }, + { + "bbox": [ + 55, + 508, + 296, + 554 + ], + "type": "text", + "content": ", which will be described in detail below." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 555, + 295, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 555, + 295, + 603 + ], + "spans": [ + { + "bbox": [ + 55, + 555, + 295, + 603 + ], + "type": "text", + "content": "In addition to the default regression losses for " + }, + { + "bbox": [ + 55, + 555, + 295, + 603 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_1" + }, + { + "bbox": [ + 55, + 555, + 295, + 603 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 555, + 295, + 603 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_2" + }, + { + "bbox": [ + 55, + 555, + 295, + 603 + ], + "type": "text", + "content": ", and the predefined losses in Eq. 3 and Eq. 4 for " + }, + { + "bbox": [ + 55, + 555, + 295, + 603 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_3" + }, + { + "bbox": [ + 55, + 555, + 295, + 603 + ], + "type": "text", + "content": ", we further introduce a temporal consistency loss, " + }, + { + "bbox": [ + 55, + 555, + 295, + 603 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{t}}" + }, + { + "bbox": [ + 55, + 555, + 295, + 603 + ], + "type": "text", + "content": ", which will also be described in detail below." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 603, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 603, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 603, + 296, + 714 + ], + "type": "text", + "content": "3D Point Tracking. As illustrated at the top of Fig.5, the keyframe is set to the first image of the global video sequence and fed to the proposed " + }, + { + "bbox": [ + 55, + 603, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_3" + }, + { + "bbox": [ + 55, + 603, + 296, + 714 + ], + "type": "text", + "content": " to obtain the pointmap matching result of each query point (initialized at the first image) under the coordinate system of each reference frame " + }, + { + "bbox": [ + 55, + 603, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{X}_{m}^{t_{1},t_{1}},\\mathbf{X}_{m}^{t_{1},t_{2}},\\mathbf{X}_{m}^{t_{1},t_{3}},\\dots \\mathbf{X}_{m}^{t_{1},t_{T}}\\}" + }, + { + "bbox": [ + 55, + 603, + 296, + 714 + ], + "type": "text", + "content": ", while the set of reference frames " + }, + { + "bbox": [ + 55, + 603, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}^{t_1},\\mathbf{I}^{t_2},\\mathbf{I}^{t_3},\\dots \\mathbf{I}^{t_T}\\}" + }, + { + "bbox": [ + 55, + 603, + 296, + 714 + ], + "type": "text", + "content": " is fed to the " + }, + { + "bbox": [ + 55, + 603, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_1" + }, + { + "bbox": [ + 55, + 603, + 296, + 714 + ], + "type": "text", + "content": " to obtain the pointmap under each ego coordinate system. The dense tracking results can be further sparsified by indexing the 2D coordinates. When inference on a video" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 318, + 72, + 555, + 322 + ], + "blocks": [ + { + "bbox": [ + 318, + 72, + 555, + 322 + ], + "lines": [ + { + "bbox": [ + 318, + 72, + 555, + 322 + ], + "spans": [ + { + "bbox": [ + 318, + 72, + 555, + 322 + ], + "type": "image", + "image_path": "8239ab20be4562800b2c68569d3d5ab2c42bca1f03dcdbe48b9eabdda64d41a0.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 331, + 555, + 375 + ], + "lines": [ + { + "bbox": [ + 313, + 331, + 555, + 375 + ], + "spans": [ + { + "bbox": [ + 313, + 331, + 555, + 375 + ], + "type": "text", + "content": "Figure 5. Inference pipelines for point tracking, video depth, and multi-view reconstruction. " + }, + { + "bbox": [ + 313, + 331, + 555, + 375 + ], + "type": "inline_equation", + "content": "t_k" + }, + { + "bbox": [ + 313, + 331, + 555, + 375 + ], + "type": "text", + "content": " indicates the keyframe. With the help of the motion module and flexible input construction, PO-MATO can be easily applied to downstream temporal tasks." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 384, + 555, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 384, + 555, + 430 + ], + "spans": [ + { + "bbox": [ + 313, + 384, + 555, + 430 + ], + "type": "text", + "content": "longer than " + }, + { + "bbox": [ + 313, + 384, + 555, + 430 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 313, + 384, + 555, + 430 + ], + "type": "text", + "content": " frames, a simple sliding-window approach with an overlap of four frames is adopted to enhance the consistency between adjacent video windows. The temporal consistency loss " + }, + { + "bbox": [ + 313, + 384, + 555, + 430 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{t}}" + }, + { + "bbox": [ + 313, + 384, + 555, + 430 + ], + "type": "text", + "content": " for tracking is:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 323, + 437, + 554, + 467 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 437, + 554, + 467 + ], + "spans": [ + { + "bbox": [ + 323, + 437, + 554, + 467 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {t}} = \\frac {1}{T} \\sum_ {i = 1} ^ {T} \\left\\| \\frac {\\mathbf {X} _ {m} ^ {t _ {1} , t _ {i}}}{z _ {m} ^ {T}} - \\frac {\\bar {\\mathbf {X}} _ {m} ^ {t _ {1} , t _ {i}}}{\\bar {z} _ {m} ^ {T}} \\right\\| + \\left\\| \\frac {\\mathbf {X} ^ {t _ {i} , t _ {i}}}{z ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\\bar {z} ^ {T}} \\right\\|, \\tag {6}", + "image_path": "77c2053e0882f10da0a9b7e380f417e6d9b57a1125876608e85426222f0b0d45.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 475, + 554, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 475, + 554, + 510 + ], + "spans": [ + { + "bbox": [ + 313, + 475, + 554, + 510 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 475, + 554, + 510 + ], + "type": "inline_equation", + "content": "z_{m}^{T} = \\mathrm{norm}\\left(\\mathbf{X}_{m}^{t_{1},t_{1}},\\mathbf{X}_{m}^{t_{1},t_{2}},\\dots,\\mathbf{X}_{m}^{t_{1},t_{T}}\\right)" + }, + { + "bbox": [ + 313, + 475, + 554, + 510 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 475, + 554, + 510 + ], + "type": "inline_equation", + "content": "\\bar{z}_T =" + }, + { + "bbox": [ + 313, + 475, + 554, + 510 + ], + "type": "text", + "content": " norm " + }, + { + "bbox": [ + 313, + 475, + 554, + 510 + ], + "type": "inline_equation", + "content": "(\\bar{\\mathbf{X}}_m^{t_1,t_1},\\bar{\\mathbf{X}}_m^{t_1,t_2},\\dots,\\bar{\\mathbf{X}}_m^{t_1,t_T})" + }, + { + "bbox": [ + 313, + 475, + 554, + 510 + ], + "type": "text", + "content": " . " + }, + { + "bbox": [ + 313, + 475, + 554, + 510 + ], + "type": "inline_equation", + "content": "z_{m}^{T}" + }, + { + "bbox": [ + 313, + 475, + 554, + 510 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 475, + 554, + 510 + ], + "type": "inline_equation", + "content": "\\bar{z}_T" + }, + { + "bbox": [ + 313, + 475, + 554, + 510 + ], + "type": "text", + "content": " are defined analogously." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 510, + 554, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 510, + 554, + 588 + ], + "spans": [ + { + "bbox": [ + 313, + 510, + 554, + 588 + ], + "type": "text", + "content": "Video Depth Estimation. As shown in the middle part of the Fig. 5, the input video sequence is formulated to a set of identical image pairs " + }, + { + "bbox": [ + 313, + 510, + 554, + 588 + ], + "type": "inline_equation", + "content": "\\{(\\mathbf{I}^{t_1},\\mathbf{I}^{t_1}),(\\mathbf{I}^{t_2},\\mathbf{I}^{t_2}),\\dots,(\\mathbf{I}^{t_T},\\mathbf{I}^{t_T})\\}" + }, + { + "bbox": [ + 313, + 510, + 554, + 588 + ], + "type": "text", + "content": " and fed to " + }, + { + "bbox": [ + 313, + 510, + 554, + 588 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_1" + }, + { + "bbox": [ + 313, + 510, + 554, + 588 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 510, + 554, + 588 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_2" + }, + { + "bbox": [ + 313, + 510, + 554, + 588 + ], + "type": "text", + "content": ", where the predictions from each head are identical: " + }, + { + "bbox": [ + 313, + 510, + 554, + 588 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{X}^{t_1,t_1},\\mathbf{X}^{t_2,t_2},\\dots,\\mathbf{X}^{t_N,t_N}\\}" + }, + { + "bbox": [ + 313, + 510, + 554, + 588 + ], + "type": "text", + "content": ". We use the output of " + }, + { + "bbox": [ + 313, + 510, + 554, + 588 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_1" + }, + { + "bbox": [ + 313, + 510, + 554, + 588 + ], + "type": "text", + "content": " as our final video depth estimation. The temporal consistency loss " + }, + { + "bbox": [ + 313, + 510, + 554, + 588 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{t}}" + }, + { + "bbox": [ + 313, + 510, + 554, + 588 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 324, + 596, + 555, + 625 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 596, + 555, + 625 + ], + "spans": [ + { + "bbox": [ + 324, + 596, + 555, + 625 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {t}} = \\frac {1}{T} \\sum_ {i = 1} ^ {T} \\left\\| \\frac {\\mathbf {X} _ {1} ^ {t _ {i} , t _ {i}}}{z _ {1} ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\\bar {z} ^ {T}} \\right\\| + \\left\\| \\frac {\\mathbf {X} _ {2} ^ {t _ {i} , t _ {i}}}{z _ {2} ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\\bar {z} ^ {T}} \\right\\|, (7)", + "image_path": "80953ea0dc5ca9ee59f95c9853c3873761fadebe7ab58f819e72258d3f71d946.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 633, + 554, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 633, + 554, + 658 + ], + "spans": [ + { + "bbox": [ + 313, + 633, + 554, + 658 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 633, + 554, + 658 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_1^{t_i,t_i}" + }, + { + "bbox": [ + 313, + 633, + 554, + 658 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 633, + 554, + 658 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_2^{t_i,t_i}" + }, + { + "bbox": [ + 313, + 633, + 554, + 658 + ], + "type": "text", + "content": " indicate the output from Head_1 and Head_2, respectively. " + }, + { + "bbox": [ + 313, + 633, + 554, + 658 + ], + "type": "inline_equation", + "content": "\\bar{\\mathbf{X}}^{t_i,t_i}" + }, + { + "bbox": [ + 313, + 633, + 554, + 658 + ], + "type": "text", + "content": " is the pointmap groundtruth." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 658, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 658, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 658, + 554, + 713 + ], + "type": "text", + "content": "3D Reconstruction. Assisted by the temporal motion module, redundant post-process operations such as global alignment can be omitted, allowing the reconstructed 3D point cloud to be obtained in a feed-forward manner. As shown in the bottom part of Fig.5, the keyframe is set to the last frame" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 81, + 70, + 526, + 218 + ], + "blocks": [ + { + "bbox": [ + 81, + 70, + 526, + 218 + ], + "lines": [ + { + "bbox": [ + 81, + 70, + 526, + 218 + ], + "spans": [ + { + "bbox": [ + 81, + 70, + 526, + 218 + ], + "type": "table", + "html": "
AlignmentMethodOptim. Onl.Sintel [4]BONN [30]KITTI [13]
Abs Rel ↓δ<1.25 ↑Abs Rel ↓δ<1.25 ↑Abs Rel ↓δ<1.25 ↑
Per-sequence scaleDUSt3R-GA [44]0.65645.20.15583.30.14481.3
MASt3R-GA [26]0.64143.90.25270.10.18374.5
MonST3R-GA [52]0.37855.80.06796.30.16874.4
Spann3R [41]0.62242.60.14481.30.19873.7
CUT3R [43]0.42147.90.07893.70.11888.1
POMATO0.41653.60.07496.10.08593.3
Per-sequence scale & shiftMonST3R-GA [52]0.33558.50.06396.40.10489.5
CUT3R [43]0.46656.20.11188.30.07594.3
POMATO0.34557.90.07296.50.08493.4
", + "image_path": "d664dc6c171f8e6a159f2659a62cc7b36112bf928cff2e86cd189897267afe7a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 222, + 555, + 257 + ], + "lines": [ + { + "bbox": [ + 55, + 222, + 555, + 257 + ], + "spans": [ + { + "bbox": [ + 55, + 222, + 555, + 257 + ], + "type": "text", + "content": "Table 1. Video depth evaluation. We report scale-invariant depth and scale & shift invariant depth accuracy on Sintel [4], Bonn [30], and KITTI [13] datasets. Methods requiring global alignment are marked “GA”, while “Optim.” and “Onl.” indicate optimization-based and online methods, respectively. The best and second best results in each category are bold and underlined, respectively." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 277, + 295, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 277, + 295, + 344 + ], + "spans": [ + { + "bbox": [ + 55, + 277, + 295, + 344 + ], + "type": "inline_equation", + "content": "\\mathbf{I}^{t_T}" + }, + { + "bbox": [ + 55, + 277, + 295, + 344 + ], + "type": "text", + "content": " within the temporal window of length " + }, + { + "bbox": [ + 55, + 277, + 295, + 344 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 55, + 277, + 295, + 344 + ], + "type": "text", + "content": " and is fed to " + }, + { + "bbox": [ + 55, + 277, + 295, + 344 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_1" + }, + { + "bbox": [ + 55, + 277, + 295, + 344 + ], + "type": "text", + "content": " with a set output of " + }, + { + "bbox": [ + 55, + 277, + 295, + 344 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{X}^{t_T,t_T},\\mathbf{X}^{t_T,t_T},\\dots,\\mathbf{X}^{t_T,t_T}\\}" + }, + { + "bbox": [ + 55, + 277, + 295, + 344 + ], + "type": "text", + "content": ". All the reference frames are input to the " + }, + { + "bbox": [ + 55, + 277, + 295, + 344 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_2" + }, + { + "bbox": [ + 55, + 277, + 295, + 344 + ], + "type": "text", + "content": " so the target pointmaps " + }, + { + "bbox": [ + 55, + 277, + 295, + 344 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{X}^{t_1,t_T},\\mathbf{X}^{t_2,t_T},\\dots,\\mathbf{X}^{t_T,t_T}\\}" + }, + { + "bbox": [ + 55, + 277, + 295, + 344 + ], + "type": "text", + "content": " are aligned under the coordinate system of the keyframe. The temporal consistency loss " + }, + { + "bbox": [ + 55, + 277, + 295, + 344 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{t}}" + }, + { + "bbox": [ + 55, + 277, + 295, + 344 + ], + "type": "text", + "content": " is:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 353, + 295, + 380 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 353, + 295, + 380 + ], + "spans": [ + { + "bbox": [ + 61, + 353, + 295, + 380 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {t}} = \\frac {1}{T} \\sum_ {i = 1} ^ {T} \\left\\| \\frac {\\mathbf {X} ^ {t _ {T} , t _ {T}}}{z _ {1} ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {T} , t _ {T}}}{\\bar {z} _ {1} ^ {T}} \\right\\| + \\left\\| \\frac {\\mathbf {X} ^ {t _ {i} , t _ {T}}}{z _ {2} ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {i} , t _ {T}}}{\\bar {z} _ {2} ^ {T}} \\right\\| \\tag {8}", + "image_path": "f80df26c4822f115831822b26fcd2f96867473824b7b633de27b28e6f007792b.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 388, + 296, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 388, + 296, + 449 + ], + "spans": [ + { + "bbox": [ + 55, + 388, + 296, + 449 + ], + "type": "text", + "content": "We further freeze the parameters in Decoder1 and Decoder2 when training the temporal downstream tasks at the second stage. In our work, the temporal window length " + }, + { + "bbox": [ + 55, + 388, + 296, + 449 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 55, + 388, + 296, + 449 + ], + "type": "text", + "content": " is set to 12. Additional explorations on the temporal length can be found in Sec.4." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 460, + 137, + 474 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 460, + 137, + 474 + ], + "spans": [ + { + "bbox": [ + 55, + 460, + 137, + 474 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 480, + 178, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 480, + 178, + 493 + ], + "spans": [ + { + "bbox": [ + 55, + 480, + 178, + 493 + ], + "type": "text", + "content": "4.1. Experimental Details" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 498, + 295, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 498, + 295, + 677 + ], + "spans": [ + { + "bbox": [ + 55, + 498, + 295, + 677 + ], + "type": "text", + "content": "Training data. We train our network with a mixture of five datasets: PointOdyssey [54], Tartanair [45], ParallelDomain4D [40], DynamicReplica [21] and Carla (0.9.15) [10]. The specific number and the usage ratio of each dataset can be found in the supplementary materials. All datasets include pixel-accurate ground truth depth, as well as camera intrinsics and extrinsics, and encompass a wide variety of dynamic scenes across both indoor and outdoor environments. Among them, PointOdyssey and DynamicReplica have additional 2D trajectory annotations for dynamic objects which can be used to construct pointmap matching ground truth following Eq. 2. All datasets are used to supervise geometry learning on " + }, + { + "bbox": [ + 55, + 498, + 295, + 677 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_1" + }, + { + "bbox": [ + 55, + 498, + 295, + 677 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 498, + 295, + 677 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_2" + }, + { + "bbox": [ + 55, + 498, + 295, + 677 + ], + "type": "text", + "content": ", while only PointOdyssey, DynamicReplica, and TartanAir are used to train the proposed pointmap matching head." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "type": "text", + "content": "Training and inference details. Our model architecture is based on the publicly available DUSt3R [52] model, utilizing the same backbone consisting of a ViT-Large encoder" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 277, + 555, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 277, + 555, + 469 + ], + "spans": [ + { + "bbox": [ + 313, + 277, + 555, + 469 + ], + "type": "text", + "content": "and a ViT-Base decoder. To fully leverage MonST3R's geometry estimation capabilities in dynamic scenes, we initialize our model using the publicly available MonST3R checkpoint. For the newly introduced pointmap matching head, we initialize its weights from the pretrained " + }, + { + "bbox": [ + 313, + 277, + 555, + 469 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_2" + }, + { + "bbox": [ + 313, + 277, + 555, + 469 + ], + "type": "text", + "content": " weights of MonST3R. The temporal motion module is initialized following [14]. We train our network for 10 epochs with a cosine learning rate schedule, with an initial learning rate of 1e-4. In the first stage, which involves pairwise training, we use a batch size of 16 on 4 A100 GPUs (40G). In the second stage, where the temporal motion module is introduced, the batch size is set to 4 with a fixed temporal window length of 12. During each training iteration, we randomly sample a downstream task—3D point tracking, video depth estimation, or 3D reconstruction—to construct the input pairs and apply the corresponding loss function." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 479, + 449, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 479, + 449, + 491 + ], + "spans": [ + { + "bbox": [ + 313, + 479, + 449, + 491 + ], + "type": "text", + "content": "4.2. Video Depth Estimation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 498, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 555, + 713 + ], + "type": "text", + "content": "Following MonST3R [52] and CUT3R [43], we rescale all predictions from the same video to align them together by conducting two forms of alignment: per-sequence scale and shift alignment and per-sequence scale alignment. Thus, we can measure the per-frame depth quality and inter-frame depth consistency. We employ our proposed motion module for video depth estimation in a feed-forward manner as described in Sec.3.5 and compare our method against several variants of DUSt3R, including DUSt3R [44], MAST3R [26], MonST3R [52], Spann3R [41], and CUT3R [43]. Given 6 frames of " + }, + { + "bbox": [ + 313, + 498, + 555, + 713 + ], + "type": "inline_equation", + "content": "288 \\times 512" + }, + { + "bbox": [ + 313, + 498, + 555, + 713 + ], + "type": "text", + "content": " on an NVIDIA 4070 GPU, POMATO reconstructs the 3D point cloud in 0.7 seconds, whereas global alignment-based methods such as MonST3R require 5.8 seconds. As shown in Tab. 1, our method demonstrates comparable performance to the global alignment (GA)-based MonST3R [52] on the Sintel [4] and BONN [30] datasets, while surpassing it on KITTI dataset. Besides, we" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 126, + 74, + 482, + 160 + ], + "blocks": [ + { + "bbox": [ + 126, + 74, + 482, + 160 + ], + "lines": [ + { + "bbox": [ + 126, + 74, + 482, + 160 + ], + "spans": [ + { + "bbox": [ + 126, + 74, + 482, + 160 + ], + "type": "table", + "html": "
MethodPointOdyssey [54]ADT [31]PStudio [20]Average
L-12L-24L-12L-24L-12L-24L-12L-24
SpatialTracker* [47]20.4620.7121.6420.6730.4125.8724.1722.42
DUSt3R [44]19.0319.0329.0225.559.726.5019.2617.03
MASt3R [26]16.5817.3527.3626.4611.788.0918.5717.30
MonST3R [52]27.3127.9228.3026.1316.5011.0624.0321.70
POMATO33.2033.5831.5728.2224.5919.7929.7927.20
", + "image_path": "f5f28d1c790be8bf149f762f29cd56a6fb328dbe358471623ca58d55907612bc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 165, + 555, + 187 + ], + "lines": [ + { + "bbox": [ + 55, + 165, + 555, + 187 + ], + "spans": [ + { + "bbox": [ + 55, + 165, + 555, + 187 + ], + "type": "text", + "content": "Table 2. 3D tracking evaluation. We report the APD metric to evaluate 3D point tracking on the PointOdyssey [54], ADT [31], and PStudio [20] datasets. L-12 and L-24 indicate tracking within the temporal length of 12 frames and 24 frames, respectively." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 69, + 198, + 544, + 397 + ], + "blocks": [ + { + "bbox": [ + 69, + 198, + 544, + 397 + ], + "lines": [ + { + "bbox": [ + 69, + 198, + 544, + 397 + ], + "spans": [ + { + "bbox": [ + 69, + 198, + 544, + 397 + ], + "type": "image", + "image_path": "ffa2a8e80de9dcbc736d37d3eb5d1989e36111cb092a54c8fa4c690e19d35a0a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 406, + 555, + 429 + ], + "lines": [ + { + "bbox": [ + 55, + 406, + 555, + 429 + ], + "spans": [ + { + "bbox": [ + 55, + 406, + 555, + 429 + ], + "type": "text", + "content": "Figure 6. Qualitative comparison of dynamic scenes. Compared to MonST3R, our POMATO can provide more reliable motion masks, 3D point tracking, and reconstruction performance." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 449, + 295, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 449, + 295, + 510 + ], + "spans": [ + { + "bbox": [ + 54, + 449, + 295, + 510 + ], + "type": "text", + "content": "consistently outperform the state-of-the-art online method, CUT3R [43], across various settings. These results underscore the effectiveness of our approach, specifically (1) the joint learning of geometry and pointmap matching, and (2) the temporal motion module." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 516, + 165, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 516, + 165, + 529 + ], + "spans": [ + { + "bbox": [ + 55, + 516, + 165, + 529 + ], + "type": "text", + "content": "4.3. 3D Point Tracking" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 534, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 534, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 534, + 296, + 713 + ], + "type": "text", + "content": "For 3D point tracking task, we use the Aria Digital Twin (ADT) [31], and Panoptic Studio (PStudio) [20] benchmarks from the TAPVid-3D [25] dataset along with the validation set on the PointOdyssey [54] dataset. We report the Average Percent Deviation (APD) metric, which quantifies the average percentage of points within a threshold relative to the ground truth depth. The APD metric serves as a direct measure of the accuracy of the predicted tracking. We reformulate the datasets and project all the query points within a temporal window to the first frame. We report tracking results on the length of 12 and 24 frames. As shown in Tab.2, our POMATO achieves the best performance on both PointOdyssey and ADT datasets. It's worth mentioning that SpatialTracker [47] is a state-of-the-art network tailored for 3D point tracking with ground truth camera intrinsic as ad" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 449, + 555, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 449, + 555, + 535 + ], + "spans": [ + { + "bbox": [ + 313, + 449, + 555, + 535 + ], + "type": "text", + "content": "ditional input data. POMATO surpasses it on two datasets and improves the average APD metric by " + }, + { + "bbox": [ + 313, + 449, + 555, + 535 + ], + "type": "inline_equation", + "content": "23.3\\%" + }, + { + "bbox": [ + 313, + 449, + 555, + 535 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 449, + 555, + 535 + ], + "type": "inline_equation", + "content": "21.4\\%" + }, + { + "bbox": [ + 313, + 449, + 555, + 535 + ], + "type": "text", + "content": " for 12 frames and 24 frames, respectively. For DUSt3R-based methods, we use the output of " + }, + { + "bbox": [ + 313, + 449, + 555, + 535 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_2" + }, + { + "bbox": [ + 313, + 449, + 555, + 535 + ], + "type": "text", + "content": " as tracking results. Obviously, the ambiguous matching representation limits its capability to handle this fine-grained 3D reconstruction task in dynamic scenes." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 540, + 453, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 540, + 453, + 552 + ], + "spans": [ + { + "bbox": [ + 313, + 540, + 453, + 552 + ], + "type": "text", + "content": "4.4. Camera Pose Estimation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 557, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 557, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 557, + 555, + 713 + ], + "type": "text", + "content": "Following DUSt3R-based methods, we perform global alignment with the model trained in the first stage on the Bonn [30] and TUM [37] datasets. The sampling stride is set to 5 for the Bonn dataset and 3 for the TUM dataset. Compared with optical-flow assisted global alignment in MonST3R, the dynamic mask is computed according to Eq. 5 while the 2D pseudo label is replaced by projecting the pointmap matching results to 2D coordinates with estimated camera intrinsic. Absolute Translation Error (ATE), Relative Translation Error (RPE trans), and Relative Rotation Error (RPE rot) are reported. The evaluation results over 40 frames are reported in Tab. 4. Notably, POMATO obtains an overall state-of-the-art performance and signifi" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 70, + 504, + 148 + ], + "blocks": [ + { + "bbox": [ + 107, + 70, + 504, + 148 + ], + "lines": [ + { + "bbox": [ + 107, + 70, + 504, + 148 + ], + "spans": [ + { + "bbox": [ + 107, + 70, + 504, + 148 + ], + "type": "table", + "html": "
Temporal LengthVideo DepthTracking (12 Frames)
Sintel [4]Bonn [30]KITTI [13]PointOdyssey [54] ADT [31] PStudio [20]
Abs Rel ↓ δ<1.25 ↑Abs Rel ↓ δ<1.25 ↑Abs Rel ↓ δ<1.25 ↑Abs Rel ↓ δ<1.25 ↑APD↑APD↑APD↑
Pair-wise0.54846.20.08794.00.11389.532.0629.8723.10
6 frames0.43651.30.07695.90.08593.532.6930.9324.52
12 frames0.41653.60.07596.10.08693.333.2031.5724.59
", + "image_path": "48af71f3300680b8034c43f62dca0f4717a244f56bdddfab423db451007292d7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 64, + 198, + 288, + 267 + ], + "blocks": [ + { + "bbox": [ + 55, + 156, + 555, + 179 + ], + "lines": [ + { + "bbox": [ + 55, + 156, + 555, + 179 + ], + "spans": [ + { + "bbox": [ + 55, + 156, + 555, + 179 + ], + "type": "text", + "content": "Table 3. Ablation study on the temporal motion module. The introduction of the temporal motion module brings a significant improvement. As the temporal window length enlarges from 6 frames to 12 frames, we obtain an overall consistent improvement." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 64, + 198, + 288, + 267 + ], + "lines": [ + { + "bbox": [ + 64, + 198, + 288, + 267 + ], + "spans": [ + { + "bbox": [ + 64, + 198, + 288, + 267 + ], + "type": "table", + "html": "
MethodTUM [37]Bonn [30]
ATE ↓RPE trans ↓RPE rot ↓ATE ↓RPE trans ↓RPE rot ↓
DUSt3R [44]0.0250.0132.3610.0300.0252.522
MASt3R [26]0.0270.0151.9100.0310.0252.478
MonST3R [52]0.0210.0061.1420.0250.0212.120
CUT3R [43]0.0230.0160.5100.0280.0332.569
POMATO0.0200.0100.5090.0370.0161.782
", + "image_path": "d5e4b9b2b316a0891838b1d256865499a344168e46176581f1098b0fa64d8c42.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 63, + 310, + 135, + 352 + ], + "blocks": [ + { + "bbox": [ + 63, + 310, + 135, + 352 + ], + "lines": [ + { + "bbox": [ + 63, + 310, + 135, + 352 + ], + "spans": [ + { + "bbox": [ + 63, + 310, + 135, + 352 + ], + "type": "image", + "image_path": "48fc00ed0ccff1357aa20e08011e8b828d049473515dabed56bb834c4997282a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 63, + 353, + 134, + 394 + ], + "blocks": [ + { + "bbox": [ + 63, + 353, + 134, + 394 + ], + "lines": [ + { + "bbox": [ + 63, + 353, + 134, + 394 + ], + "spans": [ + { + "bbox": [ + 63, + 353, + 134, + 394 + ], + "type": "image", + "image_path": "98a876741c25930c1c2f4bfce1d1c178379109f82b67d15153cad3df741cec13.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 63, + 396, + 134, + 437 + ], + "blocks": [ + { + "bbox": [ + 63, + 396, + 134, + 437 + ], + "lines": [ + { + "bbox": [ + 63, + 396, + 134, + 437 + ], + "spans": [ + { + "bbox": [ + 63, + 396, + 134, + 437 + ], + "type": "image", + "image_path": "c01e277c9d6d6d62313b376a0e50c866e3c5aa3c837fecebdb575154bd8f9543.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 78, + 437, + 120, + 447 + ], + "lines": [ + { + "bbox": [ + 78, + 437, + 120, + 447 + ], + "spans": [ + { + "bbox": [ + 78, + 437, + 120, + 447 + ], + "type": "text", + "content": "Input Images" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 458, + 295, + 492 + ], + "lines": [ + { + "bbox": [ + 55, + 458, + 295, + 492 + ], + "spans": [ + { + "bbox": [ + 55, + 458, + 295, + 492 + ], + "type": "text", + "content": "Figure 7. Effectiveness of our pointmap matching head. Without explicitly filtering out the motion area, both pose and geometry estimation will be degraded." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 136, + 311, + 288, + 364 + ], + "blocks": [ + { + "bbox": [ + 136, + 311, + 288, + 364 + ], + "lines": [ + { + "bbox": [ + 136, + 311, + 288, + 364 + ], + "spans": [ + { + "bbox": [ + 136, + 311, + 288, + 364 + ], + "type": "image", + "image_path": "abf58e9ec06158973e58dbe4a60955efcc728a923015cf9b8a45e5acad73ffcb.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 136, + 364, + 288, + 433 + ], + "blocks": [ + { + "bbox": [ + 136, + 364, + 288, + 433 + ], + "lines": [ + { + "bbox": [ + 136, + 364, + 288, + 433 + ], + "spans": [ + { + "bbox": [ + 136, + 364, + 288, + 433 + ], + "type": "image", + "image_path": "a00bf8bcacaf47797f494012a320e2a40fdd47220aa6b3eb0c87cdcafa2a834a.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 151, + 434, + 276, + 442 + ], + "lines": [ + { + "bbox": [ + 151, + 434, + 276, + 442 + ], + "spans": [ + { + "bbox": [ + 151, + 434, + 276, + 442 + ], + "type": "text", + "content": "3D Reconstruction with our Pointmap Matching." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 64, + 505, + 287, + 548 + ], + "blocks": [ + { + "bbox": [ + 55, + 275, + 295, + 299 + ], + "lines": [ + { + "bbox": [ + 55, + 275, + 295, + 299 + ], + "spans": [ + { + "bbox": [ + 55, + 275, + 295, + 299 + ], + "type": "text", + "content": "Table 4. Pose estimation. Our method achieves an overall best performance and improves the RPE rot metric significantly." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 64, + 505, + 287, + 548 + ], + "lines": [ + { + "bbox": [ + 64, + 505, + 287, + 548 + ], + "spans": [ + { + "bbox": [ + 64, + 505, + 287, + 548 + ], + "type": "table", + "html": "
MethodBonn [30]PointOdyssey [54]ADT [31]PStudio [20]
ATE ↓RPE trans ↓RPE rot ↓APD ↑APD ↑APD ↑
W/O Head30.0400.0151.72129.1029.6216.94
W/ Head30.0370.0161.78232.0629.8723.10
", + "image_path": "80719c660669e2d285012f6663ec105a6137bf97e42c4107de2e9c454bfec2f8.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 556, + 295, + 590 + ], + "lines": [ + { + "bbox": [ + 55, + 556, + 295, + 590 + ], + "spans": [ + { + "bbox": [ + 55, + 556, + 295, + 590 + ], + "type": "text", + "content": "Table 5. Ablation study on the effectiveness of the pointmap matching head. The comparisons are reported on the pose estimation and 3D point tracking tasks." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 613, + 295, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 613, + 295, + 637 + ], + "spans": [ + { + "bbox": [ + 55, + 613, + 295, + 637 + ], + "type": "text", + "content": "cantly improves the RPE-rot metric, surpassing MonST3R by " + }, + { + "bbox": [ + 55, + 613, + 295, + 637 + ], + "type": "inline_equation", + "content": "55.4\\%" + }, + { + "bbox": [ + 55, + 613, + 295, + 637 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 613, + 295, + 637 + ], + "type": "inline_equation", + "content": "13.3\\%" + }, + { + "bbox": [ + 55, + 613, + 295, + 637 + ], + "type": "text", + "content": " on the TUM and Bonn datasets." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 55, + 647, + 149, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 647, + 149, + 659 + ], + "spans": [ + { + "bbox": [ + 55, + 647, + 149, + 659 + ], + "type": "text", + "content": "4.5. Ablation Study" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 55, + 665, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 296, + 714 + ], + "type": "text", + "content": "We conduct extensive ablation studies to evaluate the effectiveness of the temporal motion module and the proposed pointmap matching head. As shown in Table 3, we report results for three models: one trained with only pairwise" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 200, + 555, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 200, + 555, + 464 + ], + "spans": [ + { + "bbox": [ + 313, + 200, + 555, + 464 + ], + "type": "text", + "content": "images (first-stage training), one using a shorter temporal window of 6 frames, and another using the default temporal window length of 12 frames. Incorporating temporal consistency yields substantial improvements across all datasets for video depth estimation and 3D point tracking. Further improvement is achieved when the temporal window length increases from 6 frames to 12 frames. In Table 5, we evaluate the effectiveness of the pointmap matching head. While it introduces only a modest improvement in the ATE metric, we attribute this to the limited motion and minimal viewpoint variation in the indoor evaluation dataset. As illustrated in Fig. 7, under challenging in-the-wild conditions with significant motion and rapid viewpoint changes, removing the pointmap matching head introduces ambiguity in explicit rigid transformation estimation, resulting in a clear degradation in performance. To further demonstrate the impact of the pointmap matching head on 3D point tracking, we conduct tracking experiments over 12 frames using the pairwise input setup. Clearly, removing the pointmap matching head (using only " + }, + { + "bbox": [ + 313, + 200, + 555, + 464 + ], + "type": "inline_equation", + "content": "\\mathrm{Head}_2" + }, + { + "bbox": [ + 313, + 200, + 555, + 464 + ], + "type": "text", + "content": ") leads to an inevitable performance drop, emphasizing explicit correspondence modeling for reliable long-term tracking." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 474, + 466, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 474, + 466, + 487 + ], + "spans": [ + { + "bbox": [ + 313, + 474, + 466, + 487 + ], + "type": "text", + "content": "5. Discussion and Conclusion" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 494, + 555, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 494, + 555, + 673 + ], + "spans": [ + { + "bbox": [ + 313, + 494, + 555, + 673 + ], + "type": "text", + "content": "We introduce POMATO, a unified framework for geometry estimation and motion understanding in dynamic scenes. By leveraging the proposed pointmap matching head, our method effectively distinguishes moving regions, thereby mitigating the interference introduced by dynamic objects. The temporal motion module further facilitates the learning of temporal dynamics across frames, enhancing scale consistency and improving performance in tasks where both geometry and matching are critical—most notably, 3D point tracking. The downstream temporal tasks including 3D point tracking, video depth estimation, and 3D reconstruction can be easily applied in a feed-forward manner. In future work, we plan to scale up training with more dynamic reconstruction and matching datasets to further enhance 3D reconstruction and tracking performance." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 673, + 554, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 673, + 554, + 709 + ], + "spans": [ + { + "bbox": [ + 313, + 673, + 554, + 709 + ], + "type": "text", + "content": "Acknowledgement. This work was supported by the National Natural Science Foundation of China (No. 62206244)" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 91, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 61, + 91, + 296, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 91, + 296, + 134 + ], + "spans": [ + { + "bbox": [ + 61, + 91, + 296, + 134 + ], + "type": "text", + "content": "[1] Daniel Barath, Dmytro Mishkin, Luca Cavalli, Paul-Edouard Sarlin, Petr Hruby, and Marc Pollefeys. Affineglue: Joint matching and robust estimation. arXiv preprint arXiv:2307.15381, 2023. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 135, + 296, + 200 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 135, + 296, + 200 + ], + "spans": [ + { + "bbox": [ + 61, + 135, + 296, + 200 + ], + "type": "text", + "content": "[2] Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendelevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, Varun Jampani, and Robin Rombach. Stable video diffusion: Scaling latent video diffusion models to large datasets. abs/2311.15127, 2023. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 202, + 295, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 202, + 295, + 245 + ], + "spans": [ + { + "bbox": [ + 62, + 202, + 295, + 245 + ], + "type": "text", + "content": "[3] Aleksei Bochkovskii, Amael Delaunoy, Hugo Germain, Marcel Santos, Yichao Zhou, Stephan R. Richter, and Vladlen Koltun. Depth pro: Sharp monocular metric depth in less than a second. arXiv, 2024. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 247, + 295, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 247, + 295, + 289 + ], + "spans": [ + { + "bbox": [ + 62, + 247, + 295, + 289 + ], + "type": "text", + "content": "[4] Daniel J. Butler, Jonas Wulff, Garrett B. Stanley, and Michael J. Black. A naturalistic open source movie for optical flow evaluation. In ECCV, pages 611-625, 2012. 6, 8" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 291, + 295, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 291, + 295, + 312 + ], + "spans": [ + { + "bbox": [ + 62, + 291, + 295, + 312 + ], + "type": "text", + "content": "[5] Ang Cao and Justin Johnson. Hexplane: A fast representation for dynamic scenes. CVPR, 2023. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 314, + 295, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 314, + 295, + 357 + ], + "spans": [ + { + "bbox": [ + 62, + 314, + 295, + 357 + ], + "type": "text", + "content": "[6] Sili Chen, Hengkai Guo, Shengnan Zhu, Feihu Zhang, Zi long Huang, Jiashi Feng, and Bingyi Kang. Video depth anything: Consistent depth estimation for super-long videos. arXiv preprint arXiv:2501.12375, 2025. 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 358, + 295, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 358, + 295, + 379 + ], + "spans": [ + { + "bbox": [ + 62, + 358, + 295, + 379 + ], + "type": "text", + "content": "[7] Yu Chen, Yisong Chen, and Guoping Wang. Bundle adjustment revisited. arXiv preprint arXiv: 1912.03858, 2019. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 380, + 295, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 380, + 295, + 434 + ], + "spans": [ + { + "bbox": [ + 62, + 380, + 295, + 434 + ], + "type": "text", + "content": "[8] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 224-236, 2018. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 436, + 295, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 436, + 295, + 501 + ], + "spans": [ + { + "bbox": [ + 62, + 436, + 295, + 501 + ], + "type": "text", + "content": "[9] Carl Doersch, Yi Yang, Mel Vecerik, Dilara Gokay, Ankush Gupta, Yusuf Aytar, Joao Carreira, and Andrew Zisserman. Tapir: Tracking any point with per-frame initialization and temporal refinement. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10061-10072, 2023. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 57, + 502, + 295, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 502, + 295, + 545 + ], + "spans": [ + { + "bbox": [ + 57, + 502, + 295, + 545 + ], + "type": "text", + "content": "[10] Alexey Dosovitskiy, German Ros, Felipe Codevilla, Antonio Lopez, and Vladlen Koltun. Carla: An open urban driving simulator. In Conference on robot learning, pages 1-16. PMLR, 2017. 6, 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 547, + 295, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 547, + 295, + 600 + ], + "spans": [ + { + "bbox": [ + 57, + 547, + 295, + 600 + ], + "type": "text", + "content": "[11] Xiao Fu, Wei Yin, Mu Hu, Kaixuan Wang, Yuexin Ma, Ping Tan, Shaojie Shen, Dahua Lin, and Xiaoxiao Long. Geowizard: Unleashing the diffusion priors for 3d geometry estimation from a single image. arXiv preprint arXiv: 2403.12013, 2024. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 602, + 295, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 602, + 295, + 635 + ], + "spans": [ + { + "bbox": [ + 57, + 602, + 295, + 635 + ], + "type": "text", + "content": "[12] Yasutaka Furukawa, Carlos Hernández, et al. Multi-view stereo: A tutorial. Foundations and Trends® in Computer Graphics and Vision, 9(1-2):1-148, 2015. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 636, + 295, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 636, + 295, + 668 + ], + "spans": [ + { + "bbox": [ + 57, + 636, + 295, + 668 + ], + "type": "text", + "content": "[13] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In CVPR, pages 3354-3361, 2012. 6, 8" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 57, + 670, + 295, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 670, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 57, + 670, + 295, + 713 + ], + "type": "text", + "content": "[14] Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff:Animate your personalized text-to-image diffusion models without specific tuning, 2023.6" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 316, + 73, + 553, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 553, + 106 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 553, + 106 + ], + "type": "text", + "content": "[15] Adam W. Harley, Zhaoyuan Fang, and Katerina Fragkiadaki. Particle video revisited: Tracking through occlusions using point trajectories. In ECCV, pages 59-75. Springer, 2022. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 107, + 553, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 107, + 553, + 171 + ], + "spans": [ + { + "bbox": [ + 317, + 107, + 553, + 171 + ], + "type": "text", + "content": "[16] Mu Hu, Wei Yin, Chi Zhang, Zhipeng Cai, Xiaoxiao Long, Hao Chen, Kaixuan Wang, Gang Yu, Chunhua Shen, and Shaojie Shen. Metric3d v2: A versatile monocular geometric foundation model for zero-shot metric depth and surface normal estimation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 2, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 173, + 553, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 173, + 553, + 215 + ], + "spans": [ + { + "bbox": [ + 317, + 173, + 553, + 215 + ], + "type": "text", + "content": "[17] Wenbo Hu, Xiangjun Gao, Xiaoyu Li, Sijie Zhao, Xiaodong Cun, Yong Zhang, Long Quan, and Ying Shan. Depthcrafter: Generating consistent long depth sequences for open-world videos. In CVPR, 2025. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 217, + 553, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 217, + 553, + 270 + ], + "spans": [ + { + "bbox": [ + 316, + 217, + 553, + 270 + ], + "type": "text", + "content": "[18] Mustafa Işık, Martin Rünz, Markos Georgopoulos, Taras Khakhulin, Jonathan Starck, Lourdes Agapito, and Matthias Nießner. Humanrf: High-fidelity neural radiance fields for humans in motion. ACM Transactions on Graphics (TOG), 42(4):1-12, 2023. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 272, + 553, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 272, + 553, + 315 + ], + "spans": [ + { + "bbox": [ + 316, + 272, + 553, + 315 + ], + "type": "text", + "content": "[19] Muhammad Zubair Irshad, Mauro Comi, Yen-Chen Lin, Nick Heppert, Abhinav Valada, Rares Ambrus, Zsolt Kira, and Jonathan Tremblay. Neural fields in robotics: A survey. arXiv preprint arXiv: 2410.20220, 2024. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 316, + 553, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 316, + 553, + 381 + ], + "spans": [ + { + "bbox": [ + 317, + 316, + 553, + 381 + ], + "type": "text", + "content": "[20] Hanbyul Joo, Tomas Simon, Xulong Li, Hao Liu, Lei Tan, Lin Gui, Sean Banerjee, Timothy Scott Godisart, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh. Panoptic studio: A massively multiview system for social interaction capture. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2017. 7, 8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 383, + 553, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 383, + 553, + 425 + ], + "spans": [ + { + "bbox": [ + 316, + 383, + 553, + 425 + ], + "type": "text", + "content": "[21] Nikita Karaev, Ignacio Rocco, Benjamin Graham, Natalia Neverova, Andrea Vedaldi, and Christian Rupprecht. Dynamic stereo: Consistent dynamic depth from stereo videos. CVPR, 2023. 6, 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 426, + 553, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 426, + 553, + 468 + ], + "spans": [ + { + "bbox": [ + 316, + 426, + 553, + 468 + ], + "type": "text", + "content": "[22] Nikita Karaev, Ignacio Rocco, Benjamin Graham, Natalia Neverova, Andrea Vedaldi, and Christian Rupprecht. Co-tracker: It is better to track together. In Proc. ECCV, 2024. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 471, + 553, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 471, + 553, + 534 + ], + "spans": [ + { + "bbox": [ + 317, + 471, + 553, + 534 + ], + "type": "text", + "content": "[23] Bingxin Ke, Anton Obukhov, Shengyu Huang, Nando Metzger, Rodrigo Caye Daudt, and Konrad Schindler. Repurposing diffusion-based image generators for monocular depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2, 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 536, + 553, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 536, + 553, + 569 + ], + "spans": [ + { + "bbox": [ + 316, + 536, + 553, + 569 + ], + "type": "text", + "content": "[24] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM TOG, 42(4):139-1, 2023. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 571, + 553, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 571, + 553, + 613 + ], + "spans": [ + { + "bbox": [ + 317, + 571, + 553, + 613 + ], + "type": "text", + "content": "[25] Skanda Koppula, Ignacio Rocco, Yi Yang, Joe Heyward, João Carreira, Andrew Zisserman, Gabriel Brostow, and Carl Doersch. Tapvid-3d: A benchmark for tracking any point in 3d. arXiv preprint arXiv: 2407.05921, 2024. 7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 615, + 553, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 615, + 553, + 646 + ], + "spans": [ + { + "bbox": [ + 316, + 615, + 553, + 646 + ], + "type": "text", + "content": "[26] Vincent Leroy, Yohann Cabon, and Jérôme Revaud. Grounding image matching in 3d with mast3r. European Conference on Computer Vision, 2024. 2, 6, 7, 8" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 647, + 553, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 647, + 553, + 690 + ], + "spans": [ + { + "bbox": [ + 316, + 647, + 553, + 690 + ], + "type": "text", + "content": "[27] Philipp Lindenberger, Paul-Edouard Sarlin, and Marc Pollefeys. Lightglue: Local feature matching at light speed. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17627-17638, 2023. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 692, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 692, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 692, + 553, + 713 + ], + "type": "text", + "content": "[28] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf:" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 295, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 76, + 73, + 294, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 73, + 294, + 95 + ], + "spans": [ + { + "bbox": [ + 76, + 73, + 294, + 95 + ], + "type": "text", + "content": "Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 96, + 295, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 96, + 295, + 140 + ], + "spans": [ + { + "bbox": [ + 56, + 96, + 295, + 140 + ], + "type": "text", + "content": "[29] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, and Marc Szafraniec et al. DINOv2: Learning robust visual features without supervision. Trans. Mach. Learn. Research, 2024. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 141, + 294, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 141, + 294, + 206 + ], + "spans": [ + { + "bbox": [ + 56, + 141, + 294, + 206 + ], + "type": "text", + "content": "[30] Emanuele Palazzolo, Jens Behley, Philipp Lottes, Philippe Giguere, and Cyril Stachniss. Refusion: 3d reconstruction in dynamic environments for rgb-d cameras exploiting residuals. In 2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 7855-7862. IEEE, 2019. 6, 7, 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 209, + 294, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 209, + 294, + 262 + ], + "spans": [ + { + "bbox": [ + 56, + 209, + 294, + 262 + ], + "type": "text", + "content": "[31] Xiaqing Pan, Nicholas Charron, Yongqian Yang, Scott Peters, Thomas Whelan, Chen Kong, Omkar Parkhi, Richard Newcombe, and Carl Yuheng Ren. Aria digital twin: A new benchmark dataset for egocentric 3d machine perception. arXiv preprint arXiv: 2306.06362, 2023. 7, 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 264, + 294, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 264, + 294, + 318 + ], + "spans": [ + { + "bbox": [ + 56, + 264, + 294, + 318 + ], + "type": "text", + "content": "[32] Rene Ranftl, Vibhav Vineet, Qifeng Chen, and Vladlen Koltun. Dense monocular depth estimation in complex dynamic scenes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4058-4066, 2016. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 320, + 295, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 320, + 295, + 365 + ], + "spans": [ + { + "bbox": [ + 56, + 320, + 295, + 365 + ], + "type": "text", + "content": "[33] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In Proceedings of the IEEE/CVF international conference on computer vision, pages 12179-12188, 2021. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 365, + 294, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 365, + 294, + 409 + ], + "spans": [ + { + "bbox": [ + 56, + 365, + 294, + 409 + ], + "type": "text", + "content": "[34] Chris Russell, Rui Yu, and Lourdes Agapito. Video popuup: Monocular 3d reconstruction of dynamic scenes. In European conference on computer vision, pages 583-598. Springer, 2014. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 411, + 294, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 411, + 294, + 443 + ], + "spans": [ + { + "bbox": [ + 56, + 411, + 294, + 443 + ], + "type": "text", + "content": "[35] Peter Sand and Seth Teller. Particle video: Long-range motion estimation using point trajectories. International journal of computer vision, 80:72-91, 2008. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 445, + 294, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 445, + 294, + 487 + ], + "spans": [ + { + "bbox": [ + 56, + 445, + 294, + 487 + ], + "type": "text", + "content": "[36] Jiahao Shao, Yuanbo Yang, Hongyu Zhou, Youmin Zhang, Yujun Shen, Matteo Poggi, and Yiyi Liao. Learning temporally consistent video depth from video diffusion priors. abs/2406.01493, 2024. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 490, + 294, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 490, + 294, + 532 + ], + "spans": [ + { + "bbox": [ + 56, + 490, + 294, + 532 + ], + "type": "text", + "content": "[37] Jürgen Sturm, Nikolas Engelhard, Felix Endres, Wolfram Burgard, and Daniel Cremers. A benchmark for the evaluation of RGB-D SLAM systems. pages 573-580, 2012. 7, 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 535, + 294, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 535, + 294, + 588 + ], + "spans": [ + { + "bbox": [ + 56, + 535, + 294, + 588 + ], + "type": "text", + "content": "[38] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part II 16, pages 402–419. Springer, 2020. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 590, + 294, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 590, + 294, + 624 + ], + "spans": [ + { + "bbox": [ + 56, + 590, + 294, + 624 + ], + "type": "text", + "content": "[39] Zachary Teed and Jia Deng. Droid-slam: Deep visual slam for monocular, stereo, and rgb-d cameras. Neural Information Processing Systems, 2021. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 624, + 294, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 624, + 294, + 678 + ], + "spans": [ + { + "bbox": [ + 56, + 624, + 294, + 678 + ], + "type": "text", + "content": "[40] Basile Van Hoorick, Rundi Wu, Ege Ozguroglu, Kyle Sargent, Ruoshi Liu, Pavel Tokmakov, Achal Dave, Changxi Zheng, and Carl Vondrick. Generative camera dolly: Extreme monocular dynamic novel view synthesis. arXiv preprint arXiv:2405.14868, 2024. 6, 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 680, + 294, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 680, + 294, + 712 + ], + "spans": [ + { + "bbox": [ + 56, + 680, + 294, + 712 + ], + "type": "text", + "content": "[41] Hengyi Wang and Lourdes Agapito. 3d reconstruction with spatial memory. arXiv preprint arXiv:2408.16061, 2024. 2, 6" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 654 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 316, + 73, + 553, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 553, + 116 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 553, + 116 + ], + "type": "text", + "content": "[42] Qianqian Wang, Vickie Ye, Hang Gao, Jake Austin, Zhengqi Li, and Angjoo Kanazawa. Shape of motion: 4d reconstruction from a single video. arXiv preprint arXiv:2407.13764, 2024. 2, 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 118, + 553, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 118, + 553, + 151 + ], + "spans": [ + { + "bbox": [ + 316, + 118, + 553, + 151 + ], + "type": "text", + "content": "[43] Qianqian Wang, Yifei Zhang, Aleksander Holynski, Alexei A. Efros, and Angjoo Kanazawa. Continuous 3d perception model with persistent state, 2025. 2, 6, 7, 8" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 152, + 553, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 152, + 553, + 195 + ], + "spans": [ + { + "bbox": [ + 316, + 152, + 553, + 195 + ], + "type": "text", + "content": "[44] Shuzhe Wang, Vincent Leroy, Yohann Cabon, Boris Chidlovskii, and Jerome Revaud. DUSt3R: Geometric 3D vision made easy. In CVPR, pages 20697-20709, 2024. 2, 3, 4, 6, 7, 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 197, + 553, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 197, + 553, + 240 + ], + "spans": [ + { + "bbox": [ + 316, + 197, + 553, + 240 + ], + "type": "text", + "content": "[45] Wenshan Wang, Delong Zhu, Xiangwei Wang, Yaoyu Hu, Yuheng Qiu, Chen Wang, Yafei Hu, Ashish Kapoor, and Sebastian Scherer. TartanAir: A dataset to push the limits of visual SLAM. pages 4909-4916, 2020. 6, 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 241, + 553, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 241, + 553, + 274 + ], + "spans": [ + { + "bbox": [ + 316, + 241, + 553, + 274 + ], + "type": "text", + "content": "[46] Yihan Wang, Lahav Lipson, and Jia Deng. SEA-RAFT: Simple, efficient, accurate RAFT for optical flow. In ECCV, 2024. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 275, + 553, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 275, + 553, + 330 + ], + "spans": [ + { + "bbox": [ + 316, + 275, + 553, + 330 + ], + "type": "text", + "content": "[47] Yuxi Xiao, Qianqian Wang, Shangzhan Zhang, Nan Xue, Sida Peng, Yujun Shen, and Xiaowei Zhou. Spatialtracker: Tracking any 2d pixels in 3d space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2, 3, 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 331, + 553, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 331, + 553, + 374 + ], + "spans": [ + { + "bbox": [ + 316, + 331, + 553, + 374 + ], + "type": "text", + "content": "[48] Guangkai Xu, Yongtao Ge, Mingyu Liu, Chengxiang Fan, Kangyang Xie, Zhiyue Zhao, Hao Chen, and Chunhua Shen. Diffusion models trained with large data are transferable visual models. arXiv preprint arXiv: 2403.06090, 2024. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 376, + 553, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 376, + 553, + 419 + ], + "spans": [ + { + "bbox": [ + 316, + 376, + 553, + 419 + ], + "type": "text", + "content": "[49] Yueming Xu, Haochen Jiang, Zhongyang Xiao, Jianfeng Feng, and Li Zhang. Dg-slam: Robust dynamic gaussian splatting slam with hybrid pose optimization. arXiv preprint arXiv: 2411.08373, 2024. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 421, + 553, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 421, + 553, + 464 + ], + "spans": [ + { + "bbox": [ + 316, + 421, + 553, + 464 + ], + "type": "text", + "content": "[50] Honghui Yang, Di Huang, Wei Yin, Chunhua Shen, Haifeng Liu, Xiaofei He, Binbin Lin, Wanli Ouyang, and Tong He. Depth any video with scalable synthetic data. arXiv preprint arXiv:2410.10815, 2024. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 466, + 553, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 466, + 553, + 498 + ], + "spans": [ + { + "bbox": [ + 316, + 466, + 553, + 498 + ], + "type": "text", + "content": "[51] Lihe Yang, Bingyi Kang, Zilong Huang, Zhen Zhao, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything v2. arXiv:2406.09414, 2024. 2, 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 500, + 553, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 500, + 553, + 553 + ], + "spans": [ + { + "bbox": [ + 316, + 500, + 553, + 553 + ], + "type": "text", + "content": "[52] Junyi Zhang, Charles Herrmann, Junhwa Hur, Varun Jampani, Trevor Darrell, Forrester Cole, Deqing Sun, and Ming-Hsuan Yang. Monst3r: A simple approach for estimating geometry in the presence of motion. arXiv preprint arxiv:2410.03825, 2024. 2, 3, 6, 7, 8, 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 555, + 553, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 555, + 553, + 609 + ], + "spans": [ + { + "bbox": [ + 316, + 555, + 553, + 609 + ], + "type": "text", + "content": "[53] Guosheng Zhao, Chaojun Ni, Xiaofeng Wang, Zheng Zhu, Xueyang Zhang, Yida Wang, Guan Huang, Xinze Chen, Boyuan Wang, Youyi Zhang, Wenjun Mei, and Xingang Wang. Drivedreamer4d: World models are effective data machines for 4d driving scene representation. 2024. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 611, + 553, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 611, + 553, + 654 + ], + "spans": [ + { + "bbox": [ + 316, + 611, + 553, + 654 + ], + "type": "text", + "content": "[54] Yang Zheng, Adam W. Harley, Bokui Shen, Gordon Wetzstein, and Leonidas J. Guibas. PointOdyssey: A large-scale synthetic dataset for long-term point tracking. In ICCV, 2023. 6, 7, 8, 2" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 102, + 68, + 509, + 103 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 68, + 509, + 103 + ], + "spans": [ + { + "bbox": [ + 102, + 68, + 509, + 103 + ], + "type": "text", + "content": "POMATO: Marrying Pointmap Matching with Temporal Motions for Dynamic 3D Reconstruction" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "spans": [ + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "type": "text", + "content": "Supplementary Material" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 141, + 291, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 141, + 291, + 155 + ], + "spans": [ + { + "bbox": [ + 55, + 141, + 291, + 155 + ], + "type": "text", + "content": "A. Pointmap Matching for Global Alignment." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 163, + 296, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 163, + 296, + 369 + ], + "spans": [ + { + "bbox": [ + 55, + 163, + 296, + 369 + ], + "type": "text", + "content": "Given a sequence of video frames, the target of global alignment is to project all pairwise estimated pointmaps to the same global world coordinates. DUSt3R constructs a connectivity pairwise graph and aims to minimize the reprojection error for each image pair globally where the dynamic regions are supposed to be separated from the static regions. To this end, MonST3R [52] further introduces an assistant optical flow network [46] to help mask the dynamic regions and provide a pseudo label of 2D matching for minimizing the re-projection error in static regions. However, the introduced assistant model will introduce inevitable domain gaps and additional computation costs. Besides, the optical flow model is tailored for matching within two adjacent frames, suffering an obvious degeneration with the large view displacement. In POMATO, for an image pair " + }, + { + "bbox": [ + 55, + 163, + 296, + 369 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}^i,\\mathbf{I}^j\\}" + }, + { + "bbox": [ + 55, + 163, + 296, + 369 + ], + "type": "text", + "content": ", the dynamic mask " + }, + { + "bbox": [ + 55, + 163, + 296, + 369 + ], + "type": "inline_equation", + "content": "\\mathbf{D}^{j,i}" + }, + { + "bbox": [ + 55, + 163, + 296, + 369 + ], + "type": "text", + "content": " is calculated by comparing the difference between " + }, + { + "bbox": [ + 55, + 163, + 296, + 369 + ], + "type": "inline_equation", + "content": "\\mathbf{X}^{j,i}" + }, + { + "bbox": [ + 55, + 163, + 296, + 369 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 163, + 296, + 369 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_m^{j,i}" + }, + { + "bbox": [ + 55, + 163, + 296, + 369 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 117, + 380, + 295, + 394 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 380, + 295, + 394 + ], + "spans": [ + { + "bbox": [ + 117, + 380, + 295, + 394 + ], + "type": "interline_equation", + "content": "\\mathbf {D} ^ {j, i} = \\left| \\left| \\mathbf {X} _ {m} ^ {j, i} - \\mathbf {X} ^ {j, i} \\right| \\right| > \\alpha , \\tag {9}", + "image_path": "7e0a527c5d772d5170207369443f4492fcc8f881d8ee08801d58a55b0d28fec9.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 407, + 295, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 407, + 295, + 431 + ], + "spans": [ + { + "bbox": [ + 55, + 407, + 295, + 431 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 407, + 295, + 431 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 55, + 407, + 295, + 431 + ], + "type": "text", + "content": " is a dynamic threshold defined as " + }, + { + "bbox": [ + 55, + 407, + 295, + 431 + ], + "type": "inline_equation", + "content": "3 \\times" + }, + { + "bbox": [ + 55, + 407, + 295, + 431 + ], + "type": "text", + "content": " median " + }, + { + "bbox": [ + 55, + 407, + 295, + 431 + ], + "type": "inline_equation", + "content": "(\\|\\mathbf{X}_m^{j,i} - \\mathbf{X}^{j,i}\\|)" + }, + { + "bbox": [ + 55, + 407, + 295, + 431 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 433, + 295, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 433, + 295, + 529 + ], + "spans": [ + { + "bbox": [ + 55, + 433, + 295, + 529 + ], + "type": "text", + "content": "Given the updated camera intrinsic " + }, + { + "bbox": [ + 55, + 433, + 295, + 529 + ], + "type": "inline_equation", + "content": "\\tilde{K}" + }, + { + "bbox": [ + 55, + 433, + 295, + 529 + ], + "type": "text", + "content": " after an iteration of optimization, the target matching 2D coordinates " + }, + { + "bbox": [ + 55, + 433, + 295, + 529 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_m^{j,i} \\in \\mathbb{R}^{H \\times W \\times 2}" + }, + { + "bbox": [ + 55, + 433, + 295, + 529 + ], + "type": "text", + "content": " can be calculated as " + }, + { + "bbox": [ + 55, + 433, + 295, + 529 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_m^{j,i} = p(\\tilde{\\mathbf{K}}\\mathbf{X}_m^{j,i})" + }, + { + "bbox": [ + 55, + 433, + 295, + 529 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 55, + 433, + 295, + 529 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 55, + 433, + 295, + 529 + ], + "type": "text", + "content": " is a mapping from 3D camera coordinates to 2D pixel coordinates. The optical flow loss proposed in MonST3R can thus be modified with our dynamic mask and 2D matching coordinates. Details about the optical flow loss are referred to MonST3R [52]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 545, + 295, + 572 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 545, + 295, + 572 + ], + "spans": [ + { + "bbox": [ + 55, + 545, + 295, + 572 + ], + "type": "text", + "content": "B. Fast 3D Reconstruction with video PO-MATO" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "text", + "content": "Given a sequence of images less than the temporal window length of 12 frames, dynamic 3D reconstruction can be obtained by directly estimating the pointmaps of all reference images to the coordinate of the key frame as discussed in the Sec.3.4. Here, we provide more visualization results of this feed-forward manner and demonstrate the effectiveness of introducing the temporal motion module. As shown in Fig.8, directly applying the pairwise reconstruction will suffer from an obvious scale shift among different frames. After the temporal motion module, the consistency within the video sequence obtains an obvious enhancement." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 314, + 141, + 443, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 141, + 443, + 155 + ], + "spans": [ + { + "bbox": [ + 314, + 141, + 443, + 155 + ], + "type": "text", + "content": "C. Training Data Details" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 161, + 553, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 161, + 553, + 198 + ], + "spans": [ + { + "bbox": [ + 313, + 161, + 553, + 198 + ], + "type": "text", + "content": "The details about the training datasets can be found in Tab.6. The finetuning procedure of POMATO was conducted exclusively using synthetic training datasets." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 208, + 536, + 222 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 208, + 536, + 222 + ], + "spans": [ + { + "bbox": [ + 313, + 208, + 536, + 222 + ], + "type": "text", + "content": "D. More Visualizations on Dynamic Scenes" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 228, + 554, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 228, + 554, + 289 + ], + "spans": [ + { + "bbox": [ + 313, + 228, + 554, + 289 + ], + "type": "text", + "content": "We provide more visualizations in Fig. 9 and Fig. 10. MonST3R suffers obvious degeneration when the view displacement is large as reflected by the erroneous pose estimation while POMATO can still provide a consistent camera trajectory." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 96, + 135, + 515, + 219 + ], + "blocks": [ + { + "bbox": [ + 96, + 135, + 515, + 219 + ], + "lines": [ + { + "bbox": [ + 96, + 135, + 515, + 219 + ], + "spans": [ + { + "bbox": [ + 96, + 135, + 515, + 219 + ], + "type": "table", + "html": "
DatasetDomainScene Type# of Frames# of ScenesDynamicsRatio
PointOdyssey [54]SyntheticIndoors & Outdoors200k131Realistic57.1%
TartanAir [45]SyntheticIndoors & Outdoors100k163None14.3%
DynamicReplica [21]SyntheticIndoors145k524Realistic14.3%
ParallelDomain4D [40]SyntheticOutdoors750k15015Driving8.6%
Carla [10]SyntheticOutdoors7k5Driving5.7%
", + "image_path": "5ae28c25f828dac5cf5304f158000a3d4b15aaa5c22767f9cce24e599bcd63ea.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 228, + 555, + 251 + ], + "lines": [ + { + "bbox": [ + 55, + 228, + 555, + 251 + ], + "spans": [ + { + "bbox": [ + 55, + 228, + 555, + 251 + ], + "type": "text", + "content": "Table 6. An overview of all training datasets and sample ratio. All datasets provide both camera pose, depth, and most of them include dynamic objects." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 59, + 384, + 555, + 624 + ], + "blocks": [ + { + "bbox": [ + 59, + 384, + 555, + 624 + ], + "lines": [ + { + "bbox": [ + 59, + 384, + 555, + 624 + ], + "spans": [ + { + "bbox": [ + 59, + 384, + 555, + 624 + ], + "type": "image", + "image_path": "133a5be75fe9852f50d9dc75a3e28f6a3f8ed363159786f123db3e08abb2c86a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 635, + 555, + 657 + ], + "lines": [ + { + "bbox": [ + 55, + 635, + 555, + 657 + ], + "spans": [ + { + "bbox": [ + 55, + 635, + 555, + 657 + ], + "type": "text", + "content": "Figure 8. Fast 3D reconstruction with our temporal motion module. Given a sequence of images less than temporal window length, our POMATO can directly obtain a global pointmap under the key frame coordinate." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 67, + 112, + 545, + 653 + ], + "blocks": [ + { + "bbox": [ + 67, + 112, + 545, + 653 + ], + "lines": [ + { + "bbox": [ + 67, + 112, + 545, + 653 + ], + "spans": [ + { + "bbox": [ + 67, + 112, + 545, + 653 + ], + "type": "image", + "image_path": "33c6bad035555d5e4e9fdaf94cf6b764db4cf1f575afef326383c07bccb07da0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 85, + 661, + 523, + 673 + ], + "lines": [ + { + "bbox": [ + 85, + 661, + 523, + 673 + ], + "spans": [ + { + "bbox": [ + 85, + 661, + 523, + 673 + ], + "type": "text", + "content": "Figure 9. Compared with MonST3R, our POMATO can provide more complete dynamic masks and consistent geometry." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 62, + 106, + 545, + 644 + ], + "blocks": [ + { + "bbox": [ + 62, + 106, + 545, + 644 + ], + "lines": [ + { + "bbox": [ + 62, + 106, + 545, + 644 + ], + "spans": [ + { + "bbox": [ + 62, + 106, + 545, + 644 + ], + "type": "image", + "image_path": "1553dd9fea27bacf18cff9e88695799cf7d92dd8244a549641fdbd79c3b38df9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 658, + 555, + 682 + ], + "lines": [ + { + "bbox": [ + 55, + 658, + 555, + 682 + ], + "spans": [ + { + "bbox": [ + 55, + 658, + 555, + 682 + ], + "type": "text", + "content": "Figure 10. MonST3R suffers obvious degeneration when the view displacement is large as reflected by the erroneous pose estimation while POMATO can still provide a consistent camera trajectory." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05730/2fcd628a-662d-422e-89a8-df9b6d69e542_content_list.json b/data/2025/2504_05xxx/2504.05730/2fcd628a-662d-422e-89a8-df9b6d69e542_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..87b00e539b28ed007349d825c648107d273ab087 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/2fcd628a-662d-422e-89a8-df9b6d69e542_content_list.json @@ -0,0 +1,2550 @@ +[ + { + "type": "text", + "text": "Unified Generative Search and Recommendation", + "text_level": 1, + "bbox": [ + 184, + 101, + 815, + 125 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Teng Shi", + "bbox": [ + 192, + 132, + 267, + 148 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Renmin University of China", + "bbox": [ + 135, + 148, + 326, + 164 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 181, + 165, + 279, + 178 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "shiteng@ruc.edu.cn", + "bbox": [ + 163, + 180, + 299, + 194 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jun Xu*", + "bbox": [ + 465, + 132, + 529, + 148 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiao Zhang", + "bbox": [ + 450, + 150, + 547, + 166 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Renmin University of China", + "bbox": [ + 405, + 167, + 593, + 181 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 450, + 181, + 547, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{junxu,zhangx89}@ruc.edu.cn", + "bbox": [ + 398, + 196, + 598, + 210 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiaoxue Zang", + "bbox": [ + 709, + 132, + 825, + 148 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kai Zheng", + "bbox": [ + 723, + 150, + 810, + 167 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kuaishou Technology Co., Ltd.", + "bbox": [ + 665, + 167, + 870, + 181 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 718, + 181, + 815, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "xxic666@126.com", + "bbox": [ + 705, + 196, + 828, + 210 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "zhengk92@gmail.com", + "bbox": [ + 692, + 212, + 841, + 226 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yang Song", + "bbox": [ + 320, + 238, + 406, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kuaishou Technology Co., Ltd.", + "bbox": [ + 261, + 255, + 468, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 316, + 270, + 411, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ys@sonyis.me", + "bbox": [ + 315, + 285, + 411, + 300 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Enyun Yu", + "bbox": [ + 591, + 238, + 671, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Independent", + "bbox": [ + 589, + 255, + 676, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 584, + 270, + 681, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "yuenyun@126.com", + "bbox": [ + 566, + 286, + 697, + 300 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 83, + 309, + 156, + 323 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Modern commercial platforms typically offer both search and recommendation functionalities to serve diverse user needs, making joint modeling of these tasks an appealing direction. While prior work has shown that integrating search and recommendation can be mutually beneficial, it also reveals a performance trade-off: enhancements in one task often come at the expense of the other. This challenge arises from their distinct information requirements: search emphasizes semantic relevance between queries and items, whereas recommendation depends more on collaborative signals among users and items. Effectively addressing this trade-off requires tackling two key problems: (1) integrating both semantic and collaborative signals into item representations, and (2) guiding the model to distinguish and adapt to the unique demands of search and recommendation. The emergence of generative retrieval with Large Language Models (LLMs) presents new possibilities. This paradigm encodes items as identifiers and frames both search and recommendation as sequential generation tasks, offering the flexibility to leverage multiple identifiers and task-specific prompts. In light of this, we introduce GenSAR, a unified generative framework for balanced search and recommendation. Our approach designs dual-purpose identifiers and tailored training strategies to incorporate complementary signals and align with task-specific objectives. Experiments on both public and commercial datasets demonstrate that GenSAR effectively reduces the trade-off and achieves state-of-the-art performance on both tasks.", + "bbox": [ + 81, + 327, + 483, + 674 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "CCS Concepts", + "text_level": 1, + "bbox": [ + 83, + 686, + 202, + 700 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "- Information systems $\\rightarrow$ Recommender systems; Personalization.", + "bbox": [ + 81, + 704, + 483, + 731 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "*Corresponding authors. Work partially done at Engineering Research Center of Next-Generation Intelligent Search and Recommendation, Ministry of Education. Work done when Teng Shi was the intern at Kuaishou.", + "bbox": [ + 81, + 738, + 482, + 770 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.", + "bbox": [ + 81, + 779, + 480, + 852 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 84, + 853, + 354, + 863 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "© 2018 Copyright held by the owner/author(s). Publication rights licensed to ACM.", + "bbox": [ + 84, + 864, + 472, + 875 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM ISBN 978-1-4503-XXXX-X/18/06", + "bbox": [ + 84, + 875, + 264, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://doi.org/XXXXXXXXXXXXXXXXXX", + "bbox": [ + 84, + 883, + 264, + 895 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords", + "text_level": 1, + "bbox": [ + 514, + 309, + 601, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recommendation; Search; Large Language Model", + "bbox": [ + 514, + 328, + 815, + 342 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM Reference Format:", + "text_level": 1, + "bbox": [ + 514, + 356, + 661, + 367 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Teng Shi, Jun Xu, Xiao Zhang, Xiaoxue Zang, Kai Zheng, Yang Song, and Enyun Yu. 2018. Unified Generative Search and Recommendation. In Proceedings of Make sure to enter the correct conference title from your rights confirmation emai (Conference acronym 'XX). ACM, New York, NY, USA, 10 pages. https://doi.org/XXXXXXXXX.XXXXXXX", + "bbox": [ + 513, + 368, + 923, + 431 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 514, + 462, + 650, + 476 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To facilitate the diverse ways of information access, many commercial platforms, such as e-commerce, video, and music platforms, offer both search [2, 3, 6, 7] and recommendation [34, 48-52] (S&R) services. This provides an opportunity for joint modeling of S&R, enabling better user interest modeling and enhancing the performance of both tasks.", + "bbox": [ + 511, + 479, + 915, + 561 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Many studies have explored joint modeling of S&R, including: leveraging recommendation to enhance search [2, 3, 6, 7], using search to enhance recommendation [15, 30, 31, 37], and unified S&R modeling [29, 41, 43, 46, 47]. Although these studies have demonstrated that S&R can mutually enhance each other, they have also identified a trade-off when the model serves both tasks simultaneously [29]. Specifically, when the recommendation performance improves, the search performance tends to degrade, and vice versa. Empirical analysis of the representative methods of JSR [46] and UniSAR [29] based on a S&R dataset collected from a real commercial platform also confirmed the performance trade-off, as shown in Figure 1(a). More details please refer to Section 4.1.1.", + "bbox": [ + 511, + 563, + 915, + 729 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Analysis also showed that the trade-off is rooted in the different information requirements of S&R. Search typically focuses more on the semantic relevance between queries and items, with traditional search models often based on pre-trained language models [18, 40, 42]. In contrast, recommendation heavily relies on collaborative information, where ID-based recommendation can yield excellent results [14, 19, 44]. Figure 1(b) shows an empirical validation where the S&R performances with ID- and Text-only embeddings are shown. The ID embeddings are randomly initialized and trained, containing collaborative information, while the Text embeddings are trained with BGE [40] and then reduced to the same dimensionality as that of the ID embeddings, containing semantic", + "bbox": [ + 511, + 729, + 915, + 896 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.05730v2 [cs.IR] 10 Apr 2025", + "bbox": [ + 22, + 272, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/628b138c6ab45d0641959ce993765f284c74574409b6b4abd42ddca7826121cf.jpg", + "image_caption": [ + "(a) Trade-off between S&R" + ], + "image_footnote": [], + "bbox": [ + 99, + 109, + 277, + 191 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/0eb6ab1ddbe1b20f36c0374a24bd3c90cda43ce603162394fc84a167ceba717f.jpg", + "image_caption": [ + "(b) Performance of different embeddings", + "Figure 1: Empirical analysis on the Commercial dataset: (a) A trade-off between S&R is observed in representative joint S&R methods, JSR [46] and UniSAR [29]. (b) The performance of the sequential recommendation model SASRec [19] and the product search model QEM [2], using ID and text embeddings, respectively." + ], + "image_footnote": [], + "bbox": [ + 285, + 109, + 467, + 191 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "information. From Figure 1(b), we found that recommendation relies more on collaborative information while search focuses more on semantic information.", + "bbox": [ + 81, + 314, + 482, + 354 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Therefore, balancing the semantic information required for search and the collaborative information needed for recommendation becomes a key issue in joint S&R modeling. It is non-trivial and faces two major challenges: (1) How to incorporate both semantic and collaborative information in item representations. Existing joint S&R models typically assign a single representation to each item, making it difficult to capture both types of information effectively; (2) How to let the model understand the difference in information requirements of S&R during training. Current joint models often treat S&R tasks identically, without differentiating them during training. This makes it challenging for the model to grasp their distinct requirements.", + "bbox": [ + 81, + 356, + 482, + 521 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recently, Large Language Model (LLM) [55]-based generative retrieval for search [35, 59] and recommendation [11, 26, 56] have garnered significant attention. This provides a solution to the aforementioned challenges: (1) Generative retrieval assigns an identifier (a sequence of tokens) to each item, allowing us to assign multiple identifiers to each item to balance semantic and collaborative information; (2) Generative retrieval formulates both S&R as sequence-to-sequence (Seq2Seq) tasks, enabling the unification of different S&R tasks and helping the model better understand the distinct requirements of each task.", + "bbox": [ + 81, + 521, + 482, + 660 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Based on this, we propose GenSAR, which unifies balanced search and recommendation through generative retrieval, thereby alleviating the trade-off between S&R to better enhance each other. Firstly, we design a joint S&R identifier that integrates both semantic and collaborative information. Building on the RQ-VAE [26, 56] method, we employ shared codebooks for both semantic and collaborative information, alongside specific codebooks for each. As a result, items from search are represented by semantic codes, while items from recommendation are represented by collaborative codes. These two codes share a common portion to capture shared information while also retaining distinct parts to preserve the unique characteristics of semantic and collaborative information. Secondly, we design the joint S&R training tasks. We prepend a token representing the behavior type to the item identifier and then input the user's S&R history into the LLM (with the user query also provided for search). Different prompts are used to guide LLMs to predict the next recommended item, the next searched query,", + "bbox": [ + 81, + 660, + 482, + 896 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "and the next searched item, enabling the model to understand the distinct requirements for S&R.", + "bbox": [ + 513, + 106, + 911, + 133 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The major contributions of the paper are summarized as follows: We verified the existence of the trade-off between S&R, and identified that this trade-off arises from the different information requirements of S&R. Additionally, we have analyzed the challenges in balancing semantic and collaborative information needed for S&R.", + "bbox": [ + 513, + 133, + 913, + 202 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- We propose GenSAR, which unifies balanced S&R through generative retrieval. We designed a joint S&R identifier to balance semantic and collaborative information, and developed joint training tasks to help the model understand the different requirements of each task.", + "bbox": [ + 513, + 203, + 913, + 271 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- Experimental results on two datasets validate the effectiveness of GenSAR. GenSAR not only surpasses traditional S&R models but also outperforms generative S&R models.", + "bbox": [ + 513, + 272, + 911, + 314 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 514, + 325, + 658, + 340 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Joint Search and Recommendation. Joint modeling of S&R has attracted increasing attention in recent years and can be broadly categorized into three types: (1) Enhancing search with recommendation [2, 3, 6, 7], such as TEM [6], which uses Transformers to model user preferences, and CoPPS [7], which applies contrastive learning to address data sparsity. (2) Enhancing recommendation with search [15, 30, 31, 37], e.g., SESRec [31], which disentangles similar and dissimilar interests from both histories. (3) Unified modeling of S&R [29, 41, 43, 46, 47, 53, 54], such as JSR [46, 47] with joint loss and UniSAR [29], which models behavior transitions. While these works show mutual benefits between S&R, they also reveal a trade-off [28, 29]. This paper addresses that trade-off within a generative retrieval framework.", + "bbox": [ + 511, + 348, + 913, + 527 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Generative Search and Recommendation. With the rise of Large Language Models (LLMs) [55], LLM-based generative retrieval has been widely explored for both search [5, 21, 33, 35, 38, 58, 59] and recommendation [11, 17, 25, 26, 56]. These methods represent items as identifiers and input the user query (for search) or user history (for recommendation) into the LLM to generate the target item. Identifier designs can be grouped into: (1) Text-based, using item titles [8, 23] or substrings [5, 22]; (2) Non-learnable ID-based, with early methods assigning random IDs [11], and later ones using clustering to encode semantic or collaborative structure [17, 35, 38]; (3) Learnable codebook-based, applying techniques like RQ-VAE [26, 56] to learn identifiers from semantic or collaborative embeddings. However, most existing approaches design identifiers tailored to either search or recommendation, focusing solely on semantic or collaborative information. In joint S&R, balancing both is essential for strong performance across tasks.", + "bbox": [ + 511, + 532, + 913, + 753 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3 Our Approach", + "text_level": 1, + "bbox": [ + 514, + 766, + 663, + 782 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This section introduces our proposed method, GenSAR. Section 3.1 defines the Joint Search and Recommendation task. Section 3.2 presents the Joint Identifier module, where we design separate semantic and collaborative identifiers to balance the different needs of search and recommendation. Section 3.3 describes task-specific training objectives to help the model capture both types of information. Finally, Section 3.4 details the training and inference process of GenSAR.", + "bbox": [ + 511, + 785, + 913, + 893 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 84, + 75, + 366, + 87 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Teng Shi et al.", + "bbox": [ + 841, + 75, + 911, + 87 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/757f11261ee325c692b5f00503a37aa68878e51f8dce743a82fe3fffcbf18636.jpg", + "image_caption": [ + "Figure 2: The joint search and recommendation identifier. We extract the semantic and collaborative embeddings for each item. These two embeddings are first concatenated and passed through the shared codebooks to learn shared codes. Then, the semantic and collaborative embeddings are separately processed through specific codebooks to learn specific codes. Finally, these codes are concatenated to form two identifiers for each item: one for semantics and one for collaboration." + ], + "image_footnote": [], + "bbox": [ + 106, + 104, + 890, + 375 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Problem Formulation", + "text_level": 1, + "bbox": [ + 83, + 440, + 303, + 454 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Let $\\mathcal{U},\\mathcal{V},Q$ denote the sets of users, items, and queries, respectively. Each user $u\\in \\mathcal{U}$ has a chronologically ordered interaction history $S_{u} = [(b_{1},x_{1}),(b_{2},x_{2}),\\ldots ,(b_{N},x_{N})]$ that includes her historical S&R behaviors, where $N$ denotes the number of $u$ 's historical behaviors. $b_{i}\\in \\{\\langle \\mathrm{R}_{\\mathrm{I}}\\rangle ,\\langle \\mathrm{S}_{\\mathrm{Q}}\\rangle ,\\langle \\mathrm{S}_{\\mathrm{I}}\\rangle \\}$ represents the type of the $i$ -th behavior: $\\langle \\mathrm{R_I}\\rangle$ indicates an item clicked by the user after a recommendation, $\\langle \\mathrm{S_Q}\\rangle$ represents a query searched by the user, and $\\langle \\mathrm{S_I}\\rangle$ denotes an item clicked by the user after searching a query. $x_{i}$ denotes the $i$ -th behavior:", + "bbox": [ + 81, + 458, + 482, + 580 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nx _ {i} = \\left\\{ \\begin{array}{l l} v _ {i}, & \\text {i f} b _ {i} = \\langle \\mathrm {R} _ {\\mathrm {I}} \\rangle \\text {o r} b _ {i} = \\langle \\mathrm {S} _ {\\mathrm {I}} \\rangle , \\\\ q _ {i}, & \\text {i f} b _ {i} = \\langle \\mathrm {S} _ {\\mathrm {Q}} \\rangle , \\end{array} \\right. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 580, + 480, + 617 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $v_{i} \\in \\mathcal{V}$ denotes the $i$ -th interacted item and $q_{i} \\in Q$ is the $i$ -th searched query. Our goal is to enable the model to understand user interests and predict the next item $v_{N+1}$ for search when $b_{N+1} = \\langle \\mathrm{S}_{\\mathrm{I}} \\rangle$ or recommendation when $b_{N+1} = \\langle \\mathrm{R}_{\\mathrm{I}} \\rangle$ .", + "bbox": [ + 81, + 619, + 482, + 676 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 Joint Search and Recommendation Identifier", + "text_level": 1, + "bbox": [ + 81, + 686, + 482, + 704 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This section introduces the design of the joint S&R identifier (Figure 2). We first extract semantic and collaborative embeddings for each item. Using RQ-VAE [20, 26, 56], we apply both shared and separate codebooks to learn two identifiers per item—one semantic, one collaborative. The identifiers share common parts to capture shared information, while retaining unique parts to reflect task-specific features.", + "bbox": [ + 81, + 705, + 482, + 804 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2.1 Embedding Extraction. For each item $v \\in \\mathcal{V}$ , we can input its textual information, such as the title and description, into a pre-trained retrieval model (e.g., BERT [10], BGE [40]) to obtain an embedding $\\mathbf{v}_s \\in \\mathbb{R}^{d_s}$ that contains its semantic information. Meanwhile, we can also obtain an embedding $\\mathbf{v}_c \\in \\mathbb{R}^{d_c}$ containing its collaborative information from a pre-trained recommendation", + "bbox": [ + 81, + 811, + 482, + 896 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "model (e.g., SASRec [19], BERT4Rec [32]). $d_{s}$ and $d_{c}$ represent the dimensions of the semantic and collaborative embeddings, respectively. We map the semantic and collaborative embeddings to the same-dimensional latent space using two encoders:", + "bbox": [ + 513, + 440, + 915, + 497 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {z} _ {s} = \\operatorname {E n c o d e r} _ {s} (\\mathbf {v} _ {s}), \\quad \\mathbf {z} _ {c} = \\operatorname {E n c o d e r} _ {c} (\\mathbf {v} _ {c}), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 586, + 521, + 913, + 536 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathbf{z}_s \\in \\mathbb{R}^d, \\mathbf{z}_c \\in \\mathbb{R}^d$ and $d$ is the dimension of the latent embeddings, $\\mathrm{Encoder}_s(\\cdot)$ and $\\mathrm{Encoder}_c(\\cdot)$ are two MLPs (Multilayer Perceptrons).", + "bbox": [ + 511, + 559, + 915, + 602 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2.2 Residual Quantization. To integrate both semantic and collaborative information, we use $L_{m}$ -level shared codebooks, along with $L_{n}$ -level specific codebooks for semantic and collaborative information, respectively. First, the latent embeddings for semantic and collaborative information, $\\mathbf{z}_s$ and $\\mathbf{z}_c$ , are concatenated to form $\\mathbf{r}_0^m = [\\mathbf{z}_s; \\mathbf{z}_c] \\in \\mathbb{R}^{2d}$ . This $\\mathbf{r}_0^m$ is then passed through the $L_{m}$ -level shared codebooks to obtain the shared codes $I_m$ and the residual embedding $\\mathbf{r}_{L_m}^m$ . Then, we extract the semantic part $\\mathbf{r}_0^s = \\mathbf{r}_{L_m}^m [1:d] \\in \\mathbb{R}^d$ and the collaborative part $\\mathbf{r}_0^c = \\mathbf{r}_{L_m}^m [d:2d] \\in \\mathbb{R}^d$ from $\\mathbf{r}_{L_m}^m$ , and input them separately into the semantic and collaborative codebooks to learn their specific codes $I_s$ and $I_c$ , respectively. Finally, the shared and specific codes are concatenated, resulting in two identifiers, $I_{m+s}$ and $I_{m+c}$ , for each item. Next, we will introduce the residual quantization process for both the shared and specific codebooks.", + "bbox": [ + 513, + 619, + 915, + 833 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- Shared Codebooks. We have $L_{m}$ -level shared codebooks. At each level $i \\in \\{1, 2, \\dots, L_{m}\\}$ , we have a shared codebook $C_{i}^{m} = \\{\\mathbf{e}_{k}\\}_{k=1}^{K}$ where $K$ is the size of each codebook and $\\mathbf{e}_{k} \\in \\mathbb{R}^{2d}$ is a learnable code embedding. The residual quantization process for the shared", + "bbox": [ + 511, + 837, + 913, + 896 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Unified Generative Search and Recommendation", + "bbox": [ + 84, + 75, + 318, + 85 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 630, + 75, + 913, + 87 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "codebooks is as follows:", + "bbox": [ + 81, + 106, + 230, + 119 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nc _ {i} ^ {m} = \\underset {k} {\\arg \\min} | | \\mathbf {r} _ {i - 1} ^ {m} - \\mathbf {e} _ {k} | | _ {2} ^ {2}, \\quad \\mathbf {e} _ {k} \\in C _ {i} ^ {m},\n$$\n", + "text_format": "latex", + "bbox": [ + 160, + 122, + 401, + 148 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {r} _ {i} ^ {m} = \\mathbf {r} _ {i - 1} ^ {m} - \\mathbf {e} _ {c _ {i} ^ {m}}, \\quad \\mathbf {r} _ {0} ^ {m} = [ \\mathbf {z} _ {s}; \\mathbf {z} _ {c} ] \\in \\mathbb {R} ^ {2 d},\n$$\n", + "text_format": "latex", + "bbox": [ + 163, + 148, + 401, + 167 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $c_{i}^{m}$ is the assigned code from the $i$ -th level of the shared codebook. $\\mathbf{r}_{i-1}^{m}$ is the residual from last level. Through the recursive quantization in Eq. (3), we can obtain the shared codes $I_{m} = \\left[c_{1}^{m}, c_{2}^{m}, \\ldots, c_{L_{m}}^{m}\\right]$ and the residual embedding $\\mathbf{r}_{L_{m}}^{m}$ .", + "bbox": [ + 81, + 171, + 482, + 234 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "- Specific Codebooks. We can extract the semantic part $\\mathbf{r}_0^s = \\mathbf{r}_{L_m}^m [1:d] \\in \\mathbb{R}^d$ and the collaborative part $\\mathbf{r}_0^c = \\mathbf{r}_{L_m}^m [d:2d] \\in \\mathbb{R}^d$ from the residual embedding $\\mathbf{r}_{L_m}^m$ outputted by the shared codebooks. We then pass them separately through the $L_n$ -level semantic and collaborative specific codebooks $C_i^s$ and $C_i^c$ , where $i \\in \\{1, 2, \\dots, L_n\\}$ . Please note that, unlike the shared codebook whose code embeddings are $2d$ -dimensional, the code embeddings of the specific codebooks are $d$ -dimensional. The residual quantization process for the specific codebooks can be formulated as follows:", + "bbox": [ + 81, + 237, + 483, + 378 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nc_{i}^{s} = \\operatorname *{arg min}_{k}\\left|\\left|\\mathbf{r}_{i - 1}^{s} - \\mathbf{e}_{k}\\right|\\right|_{2}^{2},\\quad \\mathbf{e}_{k}\\in C_{i}^{s},\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 377, + 395, + 401 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nc _ {i} ^ {c} = \\underset {k} {\\arg \\min } \\left\\| \\mathbf {r} _ {i - 1} ^ {c} - \\mathbf {e} _ {k} \\right\\| _ {2} ^ {2}, \\quad \\mathbf {e} _ {k} \\in C _ {i} ^ {c}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 402, + 480, + 429 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {r} _ {i} ^ {s} = \\mathbf {r} _ {i - 1} ^ {s} - \\mathbf {e} _ {c _ {i} ^ {s}}, \\quad \\mathbf {r} _ {i} ^ {c} = \\mathbf {r} _ {i - 1} ^ {c} - \\mathbf {e} _ {c _ {i} ^ {c}},\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 431, + 370, + 448 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $c_i^s$ and $c_i^r$ represent the codes assigned by the $i$ -th level semantic-specific and collaborative-specific codebooks, respectively. Through the recursive quantization in Eq. (4), we can obtain the semantic-specific and collaborative-specific codes as follows:", + "bbox": [ + 81, + 449, + 482, + 503 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nI _ {s} = \\left[ c _ {1} ^ {s}, c _ {2} ^ {s}, \\dots , c _ {L _ {n}} ^ {s} \\right], \\quad I _ {c} = \\left[ c _ {1} ^ {c}, c _ {2} ^ {c}, \\dots , c _ {L _ {n}} ^ {c} \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 143, + 508, + 418, + 532 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Finally, by concatenating the shared codes and the specific codes, we can obtain the semantic identifier $I_{m + s}$ and collaborative identifier $I_{m + c}$ for item $v$ :", + "bbox": [ + 81, + 535, + 482, + 575 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nI _ {m + s} = \\left[ c _ {1} ^ {m}, c _ {2} ^ {m}, \\dots , c _ {L _ {m}} ^ {m}, c _ {1} ^ {s}, c _ {2} ^ {s}, \\dots , c _ {L _ {n}} ^ {s} \\right],\n$$\n", + "text_format": "latex", + "bbox": [ + 158, + 578, + 403, + 602 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nI _ {m + c} = \\left[ c _ {1} ^ {m}, c _ {2} ^ {m}, \\dots , c _ {L _ {m}} ^ {m}, c _ {1} ^ {c}, c _ {2} ^ {c}, \\dots , c _ {L _ {n}} ^ {c} \\right]. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 161, + 603, + 480, + 628 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.3 Identifier Training. After passing through the shared and specific codebooks, we can obtain the semantic and collaborative quantized embeddings as follows:", + "bbox": [ + 81, + 633, + 482, + 674 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {z}} _ {s} = \\sum_ {i = 1} ^ {L _ {m}} \\mathbf {e} _ {c _ {i} ^ {m}} [ 1: d ] + \\sum_ {i = 1} ^ {L _ {n}} \\mathbf {e} _ {c _ {i} ^ {s}}, \\quad \\hat {\\mathbf {z}} _ {c} = \\sum_ {i = 1} ^ {L _ {m}} \\mathbf {e} _ {c _ {i} ^ {m}} [ d: 2 d ] + \\sum_ {i = 1} ^ {L _ {n}} \\mathbf {e} _ {c _ {i} ^ {c}}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 94, + 678, + 482, + 715 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{e}_{c_i^m} \\in \\mathbb{R}^{2d}$ is the code embedding of the shared codebooks, $\\mathbf{e}_{c_i^s} \\in \\mathbb{R}^d$ and $\\mathbf{e}_{c_i^c} \\in \\mathbb{R}^d$ are the code embeddings of the semantic and collaborative specific codebooks. The quantized semantic embedding $\\hat{\\mathbf{z}}_s \\in \\mathbb{R}^d$ and collaborative embedding $\\hat{\\mathbf{z}}_c \\in \\mathbb{R}^d$ will be used to reconstruct the original semantic and collaborative embeddings, $\\mathbf{v}_s$ and $\\mathbf{v}_c$ :", + "bbox": [ + 81, + 720, + 482, + 810 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {v}} _ {s} = \\operatorname {D e c o d e r} _ {s} (\\hat {\\mathbf {z}} _ {s}), \\quad \\hat {\\mathbf {v}} _ {c} = \\operatorname {D e c o d e r} _ {c} (\\hat {\\mathbf {z}} _ {c}), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 156, + 815, + 482, + 830 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathrm{Decoderr}_s(\\cdot)$ and $\\mathrm{Decoderr}_c(\\cdot)$ are two MLPs. We can compute the reconstruction loss used for training the encoder and decoder as follows:", + "bbox": [ + 81, + 835, + 482, + 875 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {R e c o n}} = \\left\\| \\mathbf {v} _ {s} - \\hat {\\mathbf {v}} _ {s} \\right\\| _ {2} ^ {2} + \\left\\| \\mathbf {v} _ {c} - \\hat {\\mathbf {v}} _ {c} \\right\\| _ {2} ^ {2}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 880, + 482, + 897 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/6f6f1922647d9249aee68c7d995b2b82222e7b5d1b014f595f282014536b00fc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 526, + 104, + 898, + 236 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/9edd3d9b02c30b3b1ca9e7ca1a76fa2f37f746d23bd32759acf9aefd1337c887.jpg", + "image_caption": [ + "Figure 3: Training and Inference Process of GenSAR. During training, we provide LLM with different instructions to generate corresponding responses. During inference, we append a token at the end of the instruction to indicate the type of behavior to be predicted, enabling the LLM to be applied to either search or recommendation tasks." + ], + "image_footnote": [], + "bbox": [ + 527, + 247, + 898, + 335 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We can also compute the loss for residual quantization as follows:", + "bbox": [ + 514, + 433, + 911, + 448 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {R Q}} ^ {m} = \\sum_ {i = 1} ^ {L _ {m}} | | \\mathrm {s g} [ \\mathbf {r} _ {i - 1} ^ {m} ] - \\mathbf {e} _ {c _ {i} ^ {m}} | | _ {2} ^ {2} + \\alpha | | \\mathbf {r} _ {i - 1} ^ {m} - \\mathrm {s g} [ \\mathbf {e} _ {c _ {i} ^ {m}} ] | | _ {2} ^ {2},\n$$\n", + "text_format": "latex", + "bbox": [ + 553, + 453, + 874, + 491 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {R Q}} ^ {s} = \\sum_ {i = 1} ^ {L _ {n}} | | \\operatorname {s g} \\left[ \\mathbf {r} _ {i - 1} ^ {s} \\right] - \\mathbf {e} _ {c _ {i} ^ {s}} | | _ {2} ^ {2} + \\alpha | | \\mathbf {r} _ {i - 1} ^ {s} - \\operatorname {s g} \\left[ \\mathbf {e} _ {c _ {i} ^ {s}} \\right] | | _ {2} ^ {2}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 553, + 494, + 911, + 532 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {R Q}} ^ {c} = \\sum_ {i = 1} ^ {L _ {n}} | | \\mathrm {s g} [ \\mathbf {r} _ {i - 1} ^ {c} ] - \\mathbf {e} _ {c _ {i} ^ {c}} | | _ {2} ^ {2} + \\alpha | | \\mathbf {r} _ {i - 1} ^ {c} - \\mathrm {s g} [ \\mathbf {e} _ {c _ {i} ^ {c}} ] | | _ {2} ^ {2},\n$$\n", + "text_format": "latex", + "bbox": [ + 553, + 535, + 864, + 571 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {R Q}} = \\mathcal {L} _ {\\mathrm {R Q}} ^ {m} + \\mathcal {L} _ {\\mathrm {R Q}} ^ {s} + \\mathcal {L} _ {\\mathrm {R Q}} ^ {c},\n$$\n", + "text_format": "latex", + "bbox": [ + 555, + 574, + 718, + 592 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathrm{sg}[\\cdot ]$ denotes the stop-gradient operation and $\\alpha$ is a hyperparameter. $\\mathcal{L}_{\\mathrm{RQ}}$ is used to train the code embeddings in both the shared and specific codebooks. Finally, the total loss for training the identifier is as follows:", + "bbox": [ + 511, + 598, + 913, + 652 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {R Q - V A E}} = \\mathcal {L} _ {\\mathrm {R e c o n}} + \\mathcal {L} _ {\\mathrm {R Q}}. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 632, + 661, + 911, + 676 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.4 Behavior-aware Identifier. After learning the semantic and collaborative identifiers for each item, we can represent each user interaction $(b_{i},x_{i})$ as shown in Eq. (1). To help the model understand different behaviors in the user's interaction history, we preprocess a token indicating the behavior type to each interaction's identifier. For interactions involving items, we preprocess the corresponding behavior token to the identifier of each item. For interactions involving queries, we preprocess the behavior token to the word sequence of the query. It can be formulated as follows:", + "bbox": [ + 513, + 683, + 913, + 796 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {I D} \\left(b _ {i}, x _ {i}\\right) = \\left\\{ \\begin{array}{l l} {\\left[ \\langle \\mathrm {R} _ {\\mathrm {I}} \\rangle , c _ {1} ^ {m}, c _ {2} ^ {m}, \\dots , c _ {L _ {m}} ^ {m}, c _ {1} ^ {c}, c _ {2} ^ {c}, \\dots , c _ {L _ {n}} ^ {c} \\right],} & \\text {i f} b _ {i} = \\langle \\mathrm {R} _ {\\mathrm {I}} \\rangle , \\\\ {\\left[ \\langle \\mathrm {S} _ {\\mathrm {Q}} \\rangle , w _ {1}, w _ {2}, \\dots , w _ {| q _ {i} |} \\right],} & \\text {i f} b _ {i} = \\langle \\mathrm {S} _ {\\mathrm {Q}} \\rangle , \\\\ {\\left[ \\langle \\mathrm {S} _ {\\mathrm {I}} \\rangle , c _ {1} ^ {m}, c _ {2} ^ {m}, \\dots , c _ {L _ {m}} ^ {m}, c _ {1} ^ {s}, c _ {2} ^ {s}, \\dots , c _ {L _ {n}} ^ {s} \\right],} & \\text {i f} b _ {i} = \\langle \\mathrm {S} _ {\\mathrm {I}} \\rangle , \\end{array} \\right. \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 522, + 801, + 911, + 867 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\left[w_{1},w_{2},\\dots ,w_{|q_{i}}\\right]$ are the words of query $q_{i}$ . $\\mathrm{ID}(\\cdot)$ denotes the function for obtaining the identifier of each interaction.", + "bbox": [ + 514, + 867, + 911, + 896 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 83, + 75, + 367, + 87 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Teng Shi et al.", + "bbox": [ + 841, + 75, + 911, + 87 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3 Joint Search and Recommendation Training", + "text_level": 1, + "bbox": [ + 83, + 104, + 480, + 121 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To better adapt the LLM to joint S&R tasks, we design training objectives that help it understand user behaviors and effectively learn both semantic and collaborative identifiers.", + "bbox": [ + 81, + 125, + 482, + 165 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.1 Next Recommendation Item Prediction. To enable the LLM to perform well on the recommendation task, we let it predict the next recommended item. Unlike previous generative recommendation models [11, 26, 56] that only use the user's recommendation history, our approach incorporates search history as well. This allows the LLM to better leverage the user's historical information and understand the relationship between S&R behaviors. A sample of the data is shown below:", + "bbox": [ + 81, + 185, + 483, + 297 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Next Recommendation Item Prediction", + "text_level": 1, + "bbox": [ + 109, + 319, + 346, + 332 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Instruction: Below is the user's interaction history: $\\langle S_0\\rangle$", + "bbox": [ + 107, + 343, + 455, + 358 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Piano; $\\langle S_I\\rangle < M_{1 - }247 > < M_{2 - }197 > < S_{1 - }184 > < S_{2 - }110>$", + "bbox": [ + 109, + 358, + 455, + 371 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "...; $\\langle R_I\\rangle$ .", + "bbox": [ + 109, + 371, + 455, + 385 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Please recommend the next item the user is likely to click.", + "bbox": [ + 109, + 386, + 455, + 398 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Response: $\\langle \\mathrm{R_I}\\rangle < \\mathrm{M_{1\\_}10} > < \\mathrm{M_{2\\_}25} > < \\mathrm{R_{1\\_}52} > < \\mathrm{R_{2\\_}37}>$", + "bbox": [ + 109, + 400, + 437, + 412 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Here, $\\langle \\mathrm{M}_{1}10\\rangle < \\mathrm{M}_{2}25\\rangle$ represents the shared semantic and collaborative identifier of the item, $\\langle \\mathrm{S}_{1}184\\rangle < \\mathrm{S}_{2}110\\rangle$ represents the semantic-specific identifier, and $\\langle \\mathrm{R}_{1}52\\rangle < \\mathrm{R}_{2}37\\rangle$ represents the collaborative-specific identifier.", + "bbox": [ + 81, + 439, + 482, + 494 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.2 Next Search Query Prediction. Some works focus on query recommendation [4, 12, 39], where they predict the next query a user is likely to search. Since our user interaction history also includes search queries, we introduce a task that allows the LLM to predict the user's next intended search query based on their history. This helps the model better understand user search intent and the relationship between S&R behaviors. A sample of the data for this task is as follows:", + "bbox": [ + 81, + 515, + 482, + 626 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Next Search Query Prediction", + "text_level": 1, + "bbox": [ + 109, + 643, + 292, + 657 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Instruction: Below is the user's interaction history: $\\langle \\mathbb{R}_{\\mathrm{I}}\\rangle$", + "bbox": [ + 107, + 669, + 455, + 684 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$< \\mathrm{M}_{1 - }199>$ $< \\mathrm{M}_{2 - }175>$ $< \\mathrm{R}_{1 - }1>$ $< \\mathrm{R}_{2 - }44>$ ; $\\langle \\mathrm{R_I}\\rangle < \\mathrm{M}_{1 - }209>$", + "bbox": [ + 107, + 684, + 455, + 696 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$\\langle \\mathsf{M}_2 - 235\\rangle$ ;...;", + "bbox": [ + 107, + 698, + 455, + 710 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$\\langle \\mathsf{M}_{2\\_68}\\rangle < \\mathsf{R}_{1\\_118}\\rangle < \\mathsf{R}_{2\\_85}\\rangle$ . Please predict the next query the user might want to search.", + "bbox": [ + 107, + 712, + 455, + 739 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Response: $\\langle S_{\\mathrm{Q}}\\rangle$ Artificial Intelligence", + "bbox": [ + 109, + 739, + 341, + 753 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.3 Next Search Item Prediction. To enable the model to perform well on the search task, we have it predict the next search item. Previous generative search models [35, 59] only input the user's query into the LLM to predict the target item, which considers only the correlation between the query and the item, without taking the user's preferences into account. To address this, we include the user's S&R history in the input to reflect their preferences. A sample of the data for this task is as follows:", + "bbox": [ + 81, + 785, + 482, + 896 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Next Search Item Prediction", + "text_level": 1, + "bbox": [ + 540, + 107, + 714, + 119 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Instruction: Below is the user's interaction history: $\\langle \\mathsf{R}_{\\mathsf{I}}\\rangle$", + "bbox": [ + 539, + 131, + 888, + 146 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$< \\mathrm{M}_{1 - }199>$ $< \\mathrm{M}_{2 - }175>$ $< \\mathrm{R}_{1 - }1>$ $< \\mathrm{R}_{2 - }44>$ ; $\\langle \\mathrm{R_I}\\rangle < \\mathrm{M}_{1 - }209>$", + "bbox": [ + 540, + 146, + 888, + 159 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$\\langle \\mathsf{M}_2 - 235\\rangle$ ;...;", + "bbox": [ + 540, + 160, + 888, + 172 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$\\langle \\mathsf{M}_{2 - 68}\\rangle < \\mathsf{R}_{1 - 118}\\rangle < \\mathsf{R}_{2 - 85}\\rangle$ . The user's search query is $\\langle S_0\\rangle$ Artificial Intelligence. Please predict the next item the user might click.", + "bbox": [ + 540, + 174, + 887, + 215 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Response: $\\langle S_I\\rangle < M_{1\\_}23 > < M_{2\\_}42 > < S_{1\\_}126 > < S_{2\\_}73>$", + "bbox": [ + 540, + 215, + 872, + 229 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Here, $\\langle S_Q\\rangle$ Artificial Intelligence\" denotes the query that the user is currently searching for.", + "bbox": [ + 513, + 244, + 911, + 273 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.4 Identifier-Language Alignment. To enhance the LLM's understanding of both the collaborative and semantic identifiers of each item, we designed an identifier-language alignment task. This task enables the LLM to generate a corresponding description based on an item's identifier and, conversely, to generate the appropriate identifier from the item's description.", + "bbox": [ + 513, + 280, + 913, + 363 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "First, we have the Desc2ID task, which enables the LLM to generate the corresponding item identifier based on its description.", + "bbox": [ + 513, + 364, + 913, + 392 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Desc2ID", + "text_level": 1, + "bbox": [ + 540, + 402, + 594, + 414 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Instruction: Using the provided description \"Apple MacBook Air\", predict the corresponding item.", + "bbox": [ + 539, + 426, + 887, + 454 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Response: $<\\mathrm{M}_{1-135}> <\\mathrm{M}_{2-19}> <\\mathrm{S}_{1-41}> <\\mathrm{S}_{2-65}>$", + "bbox": [ + 540, + 455, + 844, + 468 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Then, we have the ID2Desc task, which enables the LLM to generate the corresponding item description based on its identifier.", + "bbox": [ + 513, + 484, + 913, + 513 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ID2Desc", + "text_level": 1, + "bbox": [ + 540, + 523, + 594, + 536 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Instruction: Please provide a description for the item $\\langle \\mathsf{M}_1 - 135\\rangle \\langle \\mathsf{M}_2 - 19\\rangle \\langle \\mathsf{S}_1 - 41\\rangle \\langle \\mathsf{S}_2 - 65\\rangle$ .", + "bbox": [ + 539, + 547, + 887, + 575 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Response: Apple MacBook Air.", + "bbox": [ + 540, + 575, + 736, + 589 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Please note that for both semantic and collaborative identifiers, we include the Desc2ID and ID2Desc training tasks. Since the input and output of these two tasks do not involve user history, we do not pretend a token indicating the behavior type to the identifier.", + "bbox": [ + 513, + 606, + 913, + 662 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4 Training and Inference", + "text_level": 1, + "bbox": [ + 514, + 672, + 746, + 689 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This section introduces how to train the LLM for joint S&R, and how to use the trained LLM during inference to generate the target item for either the search or recommendation task. The training and inference process of GenSAR is shown in Figure 3.", + "bbox": [ + 511, + 691, + 913, + 748 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4.1 Training. As previously mentioned, each interaction in the user's history is represented as an identifier, allowing us to formulate the task as a sequence-to-sequence problem. We train the model using next token prediction, optimizing the negative log-likelihood of generating the target as follows:", + "bbox": [ + 513, + 753, + 913, + 823 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = - \\sum_ {t = 1} ^ {T} \\log P \\left(y _ {t} \\mid y _ {< t}, \\text {I n s}\\right). \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 624, + 828, + 911, + 864 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Here, $y$ represents the behavior-aware identifier of the target to be predicted, as defined in Eq. (11). $T$ is the length of the identifier of", + "bbox": [ + 513, + 867, + 913, + 896 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Unified Generative Search and Recommendation", + "bbox": [ + 84, + 75, + 318, + 85 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 630, + 75, + 913, + 87 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/75c269323e7a69b60fb9e7112ba8de4ac044bc997681bbbf5853b88f1e5eda9c.jpg", + "table_caption": [ + "Table 1: Comparison of different generative search or recommendation methods. \"S.\" and \"R.\" denote search and recommendation respectively." + ], + "table_footnote": [], + "table_body": "
MethodsScaleBackboneTaskIdentifier
S.R.SemanticCollaborative
P5 [11, 17]60M/220MT5-small/T5-baseXX
TIGER [26]60MT5-smallXX
LC-Rec [56]7BLLaMAXX
DSI-QG[59]220MT5-baseXX
WebUltron [58]220MT5-baseXX
GenRet [33]220MT5-baseXX
GenSAR (Ours)60MT5-small
", + "bbox": [ + 86, + 152, + 475, + 267 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "the target item. Ins refers to the various instructions described in Section 3.3, which are used as inputs for the LLM.", + "bbox": [ + 81, + 276, + 482, + 306 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4.2 Inference. During training, we train the LLM according to the input-output format described in Section 3.3. During inference, to apply the LLM to search and recommendation tasks, we append a behavior token, either “ $\\langle S_I \\rangle$ ” for search or “ $\\langle R_I \\rangle$ ” for recommendation, to the input of the LLM to prompt it to generate the corresponding next item for search or recommendation, respectively. The other tasks mentioned in Section 3.3 are used as auxiliary tasks during training to help the model better understand user S&R behaviors. During generation, to ensure that the items generated by the LLM are within the candidate set, we follow previous works [17, 56] and use constrained beam search.", + "bbox": [ + 81, + 311, + 482, + 464 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.5 Discussion", + "text_level": 1, + "bbox": [ + 83, + 476, + 217, + 489 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As shown in Table 1, we compare GenSAR with various generative search or recommendation methods in terms of scale (number of parameters), backbone architecture used, and applicable tasks. GenSAR adopts T5-small as its backbone, resulting in a relatively small number of parameters while being capable of serving both S&R tasks. Compared with existing methods, it achieves an optimal balance between efficiency and effectiveness.", + "bbox": [ + 81, + 494, + 482, + 590 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In terms of novelty, unlike existing methods that focus solely on either semantic or collaborative information in identifier design, our approach incorporates both the semantic information required for search and the collaborative signals essential for recommendation. This joint consideration helps alleviate the trade-off between S&R.", + "bbox": [ + 81, + 592, + 482, + 661 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 83, + 672, + 218, + 688 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We conducted experiments to evaluate the performance of GenSAR.", + "bbox": [ + 81, + 691, + 482, + 705 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Experimental Setup", + "text_level": 1, + "bbox": [ + 83, + 717, + 290, + 734 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.1 Dataset. We conducted experiments on the following datasets: (1) Amazon $^{1}$ [13, 24]: Following previous works [2, 3, 29, 31], we use the semi-synthetic dataset based on Amazon recommendation data as the public dataset for our experiments. $^{2}$ (2) Commercial: To thoroughly evaluate the effectiveness of GenSAR, we collected a dataset from a Chinese commercial app, containing S&R interactions from 10,000 users over two weeks. For details on data", + "bbox": [ + 81, + 736, + 491, + 834 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/2492eabca5dc294d37215f26e8b18bb0004c6adcb54dc3e00c8d92abb5dbddc4.jpg", + "table_caption": [ + "Table 2: Statistics of the datasets used in this paper. \"S\" and \"R\" denote search and recommendation, respectively." + ], + "table_footnote": [], + "table_body": "
Dataset#Users#Items#Queries#Interaction-R#Interaction-S
Amazon192,40362,8839831,266,9031,081,934
Commercial10,000782,225135,2064,286,866383,465
", + "bbox": [ + 519, + 137, + 906, + 185 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "processing and train/validation/test splitting, please see the code link.", + "bbox": [ + 513, + 213, + 911, + 239 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.2 Baselines. In this work, we use the following representative methods as baselines for comparison with GenSAR.", + "bbox": [ + 513, + 253, + 911, + 280 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "First, we compare with the following recommendation models: (1) Sequential Recommendation: GRU4Rec [16]; SASRec [19]; FMLP-Rec [57]; LRURec [45]. (2) Generative Recommendation: P5-CID [11, 17]; TIGER [26]; LC-Rec [56]. Next, we compare with the following search models: (1) Personalized Search: QEM [2]; TEM [6]; CoPPS [7]. (2) Dense Retrieval: E5³ [36]; BGE⁴ [40]. (3) Generative Retrieval: DSI-QG [59]; WebUltron [58]; GenRet [33]. Finally, we compare with the following joint S&R models: JSR [46]; SESRec [31]; UnifiedSSR [41]; UniSAR [29]. For more details on the baselines, please see the code link.", + "bbox": [ + 511, + 281, + 915, + 420 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.3 Evaluation Metrics & Implementation Details. Following previous works [29, 31, 57], we use ranking metrics including top- $k$ Hit Ratio (HR) and top- $k$ Normalized Discounted Cumulative Gain (NDCG). We report the results for $k$ values of $\\{1, 5, 10\\}$ , and since NDCG@1 is the same as HR@1, we do not report it. For more details on the evaluation and model implementation, please see the code link.", + "bbox": [ + 513, + 431, + 915, + 527 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 Overall Performance", + "text_level": 1, + "bbox": [ + 514, + 545, + 730, + 559 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 3 and Table 4 show the S&R results on two datasets, respectively. From the results, we can observe that:", + "bbox": [ + 513, + 564, + 915, + 590 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Firstly, it can be seen that compared to existing search or recommendation models, GenSAR achieves state-of-the-art results. This validates the effectiveness of GenSAR in alleviating the trade-off between S&R through generative retrieval, by designing joint identifiers and training tasks for both tasks.", + "- Secondly, we can observe that most joint S&R methods (e.g., JSR, UniSAR, GenSAR) outperform traditional methods that using only item IDs, such as sequential recommendation (e.g., SASRec, FMLP-Rec) and personalized search methods (e.g., QEM, TEM, CoPPS). This demonstrates the advantages of jointly modeling of S&R, as it enhances the performance of both tasks.", + "- Thirdly, it can be observed that for search, dense retrieval (e.g., E5, BGE) and generative retrieval (e.g., GenRet, GenSAR) methods that rely on semantic information outperform personalized search models (e.g., QEM, TEM, CoPPS) that rely solely on ID information. This also confirms that for search, semantic information is more important than collaborative information." + ], + "bbox": [ + 513, + 592, + 915, + 827 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 83, + 75, + 367, + 87 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Teng Shi et al.", + "bbox": [ + 841, + 75, + 911, + 87 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "1https://cseweb.ucsd.edu/~jmcauley/datasets/amazon/links.html, https://github.com/QingyaoAi/Amazon-Product-Search-Datasets", + "bbox": [ + 81, + 840, + 482, + 864 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "2Please note that $70\\%$ of the items in the \"Kindle Store\" subset used in previous works [29, 31] lack textual information, so we use the \"Electronics\" subset, where less than $1\\%$ of the items lack text.", + "bbox": [ + 81, + 864, + 480, + 895 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "$^{3}$ https://huggingface.co/intfloat/multilingual-e5-base \n $^{4}$ https://huggingface.co/BAAI/bge-base-en-v1.5, https://huggingface.co/BAAI/bge-base-zh-v1.5", + "bbox": [ + 513, + 861, + 913, + 895 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/76915c600d59c0426bebb18d152b3f3340735ab970484cea05e5b3b7c1a13978.jpg", + "table_caption": [ + "Table 3: The recommendation performance of different methods on the two datasets. The best and the second-best methods are highlighted in bold and underlined fonts, respectively. The improvements over the second-best methods are statistically significant (t-test, $p$ -value $< 0.05$ ). Following commonly used settings [29, 31, 57], we pair the ground-truth item with 99 randomly sampled items that the user has not interacted with to form the candidate list." + ], + "table_footnote": [], + "table_body": "
DatasetsMetricsRecommendationJoint Search and Recommendation
GRU4RecSASRecFMLP-RecLRURecP5-CIDTIGERLC-RecJSRSESRecUnifiedSSRUniSARGenSAR
AmazonHR@10.04400.05440.05340.05440.08810.10730.10630.06570.06270.04770.06800.1261
HR@50.17160.18870.18980.18900.18740.20460.19730.20750.20830.16670.21710.2228
HR@100.28840.29920.30410.30010.27900.28520.27600.31880.32090.27070.33190.3063
NDCG@50.10740.12160.12170.12180.13800.15650.15220.13710.13590.10710.14320.1748
NDCG@100.14490.15710.15840.15750.16740.18240.17740.17290.17210.14050.18020.2015
CommercialHR@10.10220.15190.14420.13630.28430.26300.27030.15760.18900.15150.22140.2997
HR@50.25260.28120.27110.26370.33050.30130.30010.26850.28450.28440.32280.3496
HR@100.35270.37160.35840.35250.38300.34480.33330.35290.36900.38700.40560.4031
NDCG@50.17870.21790.20930.20210.30720.28190.28490.21420.23700.21950.27270.3241
NDCG@100.21100.24700.23730.23060.32400.29580.29550.24130.26410.25240.29930.3411
", + "bbox": [ + 91, + 169, + 901, + 327 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/c149eb9030dbb2180ada8f29499c99c7af59ffbadfa06650df7389e5df9e316a.jpg", + "table_caption": [ + "Table 4: The search performance of different methods on the two datasets. Since search relies on semantic relevance, previous works [29, 41] that randomly sample negatives often produce overly easy examples, leading to inflated performance and poor model differentiation. To address this, we follow prior personalized search methods [1, 9] and use BM25 [27] to retrieve 99 harder negatives, forming a candidate list with the positive sample for more accurate evaluation." + ], + "table_footnote": [], + "table_body": "
DatasetsMetricsSearchJoint Search and Recommendation
QEMTEMCoPPSE5BGEDSI-QGWebUltronGenRetJSRUnifiedSSRUniSARGenSAR
AmazonHR@10.15120.08390.09430.32890.40300.35580.34320.41730.08350.07990.11220.5262
HR@50.31010.34710.33800.59450.62640.58480.54640.65130.24070.24760.31290.7529
HR@100.46570.51810.49090.72030.74750.68970.62160.73390.34630.36140.43330.8217
NDCG@50.23110.21730.21540.46620.52190.47640.45070.53990.16230.16620.21430.6485
NDCG@100.28090.27220.26470.50690.56130.51030.47480.56670.19620.20280.25330.6710
CommercialHR@10.03110.03280.02650.12770.12670.10160.08040.11710.02730.01190.05110.1249
HR@50.08700.11060.09980.31080.31840.28310.26190.33200.12020.04700.18100.3655
HR@100.15390.19250.17920.40440.41940.41320.39920.46660.21370.08730.32310.5250
NDCG@50.05860.07150.06260.22300.22580.19400.17210.22730.07280.02920.11440.2472
NDCG@100.07990.09770.08800.25330.25840.23590.21640.27080.10260.04200.15970.2987
", + "bbox": [ + 91, + 395, + 901, + 559 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 Ablation Study", + "text_level": 1, + "bbox": [ + 83, + 566, + 250, + 583 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We conducted ablation study on the Commercial dataset to validate the effectiveness of the various training tasks in GenSAR, as shown in Table 5.", + "bbox": [ + 81, + 585, + 482, + 626 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Impact of Behavior Token. As shown in Section 3.2.4, we pretended a token indicating the type of behavior to be an identifier of each user interaction, enabling the LLM to recognize different behavior types. To evaluate its impact, we removed this behavior token, as shown in Table 5 (\"w/o Behavior Token\"). The results indicate that removing the behavior token degrades performance, validating that adding this token helps the LLM better understand the relationship between user S&R behaviors.", + "bbox": [ + 81, + 627, + 482, + 736 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Next Recommendation Item Prediction (NRIP). As shown in Section 3.3.1, we incorporated the training task \"Next Recommendation Item Prediction\" (NRIP), which enables the LLM to predict the next item to recommend based on user history. To evaluate its impact, we removed this task, as shown in Table 5 (\"w/o NRIP\"). The results demonstrate that removing this task significantly degrades recommendation performance and slightly reduces search performance, highlighting the importance of NRIP. Additionally, this demonstrates that recommendation training tasks can enhance search performance, verifying that recommendation can benefit search.", + "bbox": [ + 81, + 738, + 482, + 888 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Next Search Query Prediction (NSQP). We included the training task \"Next Search Query Prediction\" (NSQP) to enable the LLM to better understand user intent by predicting the next query a user might want to search, as described in Section 3.3.2. To evaluate its impact, we observed the results after removing this task, as shown in Table 5 (\"w/o NSQP\"). The results indicate that removing this task significantly degrades search performance and also affects recommendation performance, demonstrating that NSQP helps the model better understand user search intent.", + "bbox": [ + 511, + 568, + 913, + 691 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Next Search Item Prediction (NSIP). In Section 3.3.3, we introduced the training task \"Next Search Item Prediction\" (NSIP), which allows the LLM to predict the next item a user might click based on their history and input query. We analyzed the impact of this task, as shown in Table 5 (\"w/o NSIP\"). The results indicate that removing this task significantly degrades search performance, while also slightly affecting recommendation performance. This demonstrates the importance of NSIP for search and further highlights that search training tasks can enhance recommendation performance, validating that search can assist recommendation.", + "bbox": [ + 511, + 693, + 913, + 830 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Identifier-Language Alignment. In Section 3.3.4, we introduced two tasks, Desc2ID and ID2Desc, for identifier-language alignment, which help the LLM better understand the semantic and collaborative identifiers of each item. We observed the impact of", + "bbox": [ + 511, + 830, + 913, + 886 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Unified Generative Search and Recommendation", + "bbox": [ + 84, + 75, + 318, + 85 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 630, + 75, + 913, + 87 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/852f49bc31d0e7f4e1eea1e2c9a018de3829e9f64126eff2bd75879bf40fcc2b.jpg", + "table_caption": [ + "Table 5: Ablation study on the Commercial dataset, where \"w/o\" denotes the removal of the corresponding module in GenSAR." + ], + "table_footnote": [], + "table_body": "
ModelRecommendationSearch
HR@5NDCG@5HR@5NDCG@5
GenSAR0.34960.32410.36550.2472
w/o Behavior Token0.34300.31930.32980.2224
w/o NRIP0.06650.03920.34560.2342
w/o NSQP0.34010.31630.30890.2053
w/o NSIP0.33900.31520.16680.1113
w/o Desc2ID0.34160.31880.33550.2278
w/o ID2Desc0.34580.32200.33980.2308
", + "bbox": [ + 94, + 148, + 468, + 311 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ecce5a0559e48049574371e9b23e1b75329fe824cc739e97fb0a009bfb585c53.jpg", + "image_caption": [ + "(a) Recommendation Performance" + ], + "image_footnote": [], + "bbox": [ + 89, + 313, + 276, + 402 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/397a18df437bd5056f0bc169b0aeea0e25bdb1287fa1e6affaf38d1a3ef93475.jpg", + "image_caption": [ + "(b) Search Performance" + ], + "image_footnote": [], + "bbox": [ + 285, + 314, + 473, + 402 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fb95df98856241a96ab0dff04372bd6259441931f9571d2e0c69523d6587464d.jpg", + "image_caption": [ + "Figure 4: Performance of GenSAR using different identifiers.", + "Figure 5: Collision rate of different identifiers." + ], + "image_footnote": [], + "bbox": [ + 94, + 450, + 468, + 561 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "removing these two tasks, as shown in Table 5 (w/o \"Desc2ID\" and w/o \"ID2Desc\"). It can be seen that removing these tasks leads to a decrease in both S&R performance, indicating the effectiveness of these tasks in helping the LLM better understand item identifiers.", + "bbox": [ + 81, + 594, + 482, + 650 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4 Experimental Analysis", + "text_level": 1, + "bbox": [ + 83, + 662, + 313, + 678 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We conducted further experiments on the Commercial dataset to analyze the effectiveness of different modules in GenSAR.", + "bbox": [ + 81, + 680, + 480, + 708 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4.1 Impact of Different Identifier. To balance the semantic information needed for search and the collaborative information needed for recommendation, we designed the joint S&R identifier in Section 3.2. To validate its effectiveness, we compared it with identifiers learned directly from semantic embeddings or collaborative embeddings using RQ-VAE [26, 56], as shown in Figure 4. \"Only Collaborative\" represents using only collaborative embeddings, while \"Only Semantic\" represents using only semantic embeddings. The results show that identifiers derived solely from semantic or collaborative information lead to degraded performance. Furthermore, using only collaborative information results in worse search performance, which aligns with the fact that search relies more on semantic information.", + "bbox": [ + 81, + 715, + 482, + 893 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7eaf0981cb5f20384989aa56e36ad232812d82cdb50bb0e90f7e15e1c57b40d9.jpg", + "image_caption": [ + "(a) Recommendation Performance" + ], + "image_footnote": [], + "bbox": [ + 521, + 109, + 707, + 196 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6a62487787c0bd42a8e02f0875370a16e73635f9a6a3a3a06be3b844c3d351b4.jpg", + "image_caption": [ + "(b) Search Performance", + "Figure 6: Performance under different numbers of shared codebooks $L_{m}$ . We fix $L_{m} + L_{n} = 4$ and vary $L_{m}$ to observe the results." + ], + "image_footnote": [], + "bbox": [ + 718, + 109, + 906, + 196 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4.2 Collision Rate of Different Identifier. Additionally, we analyzed the advantages of different identifiers from the perspective of collision rate. The formula for calculating the collision rate is as follows:", + "bbox": [ + 511, + 289, + 913, + 339 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\text {C o l l i s i o n R a t e} = 1 - \\frac {\\# \\text {U n i q u e I d e n s i t e r}}{\\# \\text {U n i q u e I t e m}},\n$$\n", + "text_format": "latex", + "bbox": [ + 601, + 340, + 823, + 368 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where # Unique Identifier represents the number of unique identifiers, and # Unique Item represents the number of unique items. Since RQ-VAE does not guarantee a unique identifier for each item during the learning process, collisions may occur where different items share the same identifier [26, 56]. A higher collision rate can negatively impact the model's performance. From Figure 5, it can be observed that the two identifiers assigned to each item in GenSAR, incorporating both semantic and collaborative information, have a lower collision rate of $0.18\\%$ and $0.39\\%$ , respectively. In contrast, identifiers derived solely from semantic embeddings or collaborative embeddings exhibit higher collision rates of $1.37\\%$ and $0.90\\%$ , respectively. This further validates the advantage of the identifiers in GenSAR, as their lower collision rate enables the model to achieve better performance.", + "bbox": [ + 511, + 373, + 913, + 568 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4.3 Impact of Hyper-parameters. As described in Section 3.2, we have $L_{m}$ -level shared codebooks and $L_{n}$ -level specific codebooks. Here, we analyze the impact of the number of shared and specific codebooks ( $L_{m}$ and $L_{n}$ ) on the results, as shown in Figure 6. We fix $L_{m} + L_{n} = 4$ and observe the results. It can be seen that having too few ( $L_{m} = 1$ ) or too many ( $L_{m} = 3$ ) shared codebooks fails to achieve strong performance in both S&R. This indicates that $L_{m}$ needs to be properly set so that the identifier can capture both the shared information between semantics and collaboration as well as their specific characteristics. Only in this way can we achieve better performance in both S&R.", + "bbox": [ + 511, + 577, + 911, + 729 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Additionally, we analyzed the impact of identifier length on performance, as shown in Figure 7. We fix $L_{m} = 2$ and vary $L_{n}$ to adjust the identifier length and observe the results. It can be seen that both shorter $(L_{m} + L_{n} = 3)$ and longer $(L_{m} + L_{n} = 5)$ identifiers lead to performance degradation. This is because, when the identifier is too short, the identifiers learned through RQ-VAE are more prone to collisions, resulting in a higher collision rate and making it difficult for the model to distinguish between different items. On the other hand, when the identifier is too long, the model requires more decoding steps during item generation, leading to accumulated errors and ultimately deteriorating performance. Therefore, it is essential to properly set the identifier length to achieve better performance.", + "bbox": [ + 511, + 729, + 913, + 896 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 83, + 75, + 364, + 85 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Teng Shi et al.", + "bbox": [ + 841, + 75, + 911, + 85 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/15322b8796533cd47d49b9fc9b63e562b17b827521e8c4717054345e22078795.jpg", + "image_caption": [ + "(a) Recommendation Performance" + ], + "image_footnote": [], + "bbox": [ + 89, + 108, + 277, + 196 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/6c8e992b55df077b506a54aa95974fa63b68fb5fc6708d66c678ddb49d9a9381.jpg", + "image_caption": [ + "(b) Search Performance", + "Figure 7: Performance under different length of the identifier. We fix $L_{m} = 2$ and vary $L_{n}$ to adjust the identifier length." + ], + "image_footnote": [], + "bbox": [ + 285, + 108, + 475, + 196 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 83, + 272, + 207, + 285 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this paper, we propose GenSAR, which unifies balanced search and recommendation through generative retrieval to alleviate the trade-off between the two tasks and improve their performance. To balance the semantic information required for search and the collaborative information needed for recommendation, we design the joint S&R identifier and different training tasks. First, we learn two identifiers for each item to represent semantic and collaborative information, respectively. These identifiers share a common part to capture the information shared between semantics and collaboration while retaining distinct parts to preserve specific information. Second, we design different training tasks to help the model better understand the requirements of S&R tasks. We also validate the effectiveness of GenSAR through extensive experiments.", + "bbox": [ + 81, + 290, + 483, + 470 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 84, + 484, + 176, + 498 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Wasi Uddin Ahmad, Kai-Wei Chang, and Hongning Wang. 2018. Multi-task learning for document ranking and query suggestion. In International conference on learning representations.", + "[2] Qingyao Ai, Daniel N Hill, SVN Vishwanathan, and W Bruce Croft. 2019. A zero attention model for personalized product search. In Proceedings of the 28th ACM International Conference on Information and Knowledge Management. 379-388.", + "[3] Qingyao Ai, Yongfeng Zhang, Keping Bi, Xu Chen, and W Bruce Croft. 2017. Learning a hierarchical embedding model for personalized product search. In Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval. 645-654.", + "[4] Jinheon Baek, Nirupama Chandrasekaran, Silviu Cucerzan, Allen Herring, and Sujay Kumar Jauhar. 2024. Knowledge-augmented large language models for personalized contextual query suggestion. In Proceedings of the ACM on Web Conference 2024. 3355-3366.", + "[5] Michele Bevilacqua, Giuseppe Ottaviano, Patrick Lewis, Scott Yih, Sebastian Riedel, and Fabio Petroni. 2022. Autoregressive search engines: Generating substrings as document identifiers. Advances in Neural Information Processing Systems 35 (2022), 31668-31683.", + "[6] Keping Bi, Qingyao Ai, and W Bruce Croft. 2020. A transformer-based embedding model for personalized product search. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 1521-1524.", + "[7] Shitong Dai, Jiongnan Liu, Zhicheng Dou, Haonan Wang, Lin Liu, Bo Long, and Ji-Rong Wen. 2023. Contrastive Learning for User Sequence Representation in Personalized Product Search. In Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, KDD 2023, Long Beach, CA, USA, August 6-10, 2023. ACM, 380-389.", + "[8] Sunhao Dai, Ninglu Shao, Haiyuan Zhao, Weijie Yu, Zihua Si, Chen Xu, Zhongxiang Sun, Xiao Zhang, and Jun Xu. 2023. Uncovering chatgpt's capabilities in recommender systems. In Proceedings of the 17th ACM Conference on Recommender Systems. 1126-1132.", + "[9] Chenlong Deng, Yujia Zhou, and Zhicheng Dou. 2022. Improving personalized search with dual-feedback network. In Proceedings of the fifteenth ACM international conference on web search and data mining, 210-218.", + "[10] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)." + ], + "bbox": [ + 86, + 501, + 482, + 895 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[11] Shijie Geng, Shuchang Liu, Zuohui Fu, Yingqiang Ge, and Yongfeng Zhang. 2022. Recommendation as language processing (rlp): A unified pretrain, personalized prompt & predict paradigm (p5). In Proceedings of the 16th ACM Conference on Recommender Systems. 299-315.", + "[12] Yulong Gu, Wentian Bao, Dan Ou, Xiang Li, Baoliang Cui, Biyu Ma, Haikuan Huang, Qingwen Liu, and Xiaoyi Zeng. 2021. Self-supervised learning on users' spontaneous behaviors for multi-scenario ranking in e-commerce. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management. 3828-3837.", + "[13] Ruining He and Julian McAuley. 2016. Ups and downs: Modeling the visual evolution of fashion trends with one-class collaborative filtering. In proceedings of the 25th international conference on world wide web. 507-517.", + "[14] Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. 2017. Neural collaborative filtering. In Proceedings of the 26th international conference on world wide web. 173-182.", + "[15] Zhankui He, Handong Zhao, Zhaowen Wang, Zhe Lin, Ajinkya Kale, and Julian Mcauley. 2022. Query-Aware Sequential Recommendation. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management (Atlanta, GA, USA) (CIKM '22). Association for Computing Machinery, New York, NY, USA, 4019-4023.", + "[16] Balázs Hidasi, Alexandros Karatzoglou, Linas Baltrunas, and Domonkos Tikk. 2016. Session-based Recommendations with Recurrent Neural Networks. In 4th International Conference on Learning Representations, ICLR 2016, San Juan, Puerto Rico, May 2-4, 2016, Conference Track Proceedings, Yoshua Bengio and Yann LeCun (Eds.).", + "[17] Wenyue Hua, Shuyuan Xu, Yingqiang Ge, and Yongfeng Zhang. 2023. How to index item ids for recommendation foundation models. In Proceedings of the Annual International ACM SIGIR Conference on Research and Development in Information Retrieval in the Asia Pacific Region. 195-204.", + "[18] Gautier Izacard, Mathilde Caron, Lucas Hosseini, Sebastian Riedel, Piotr Bojanowski, Armand Joulin, and Edouard Grave. 2021. Unsupervised dense information retrieval with contrastive learning. arXiv preprint arXiv:2112.09118 (2021).", + "[19] Wang-Cheng Kang and Julian McAuley. 2018. Self-attentive sequential recommendation. In 2018 IEEE International Conference on Data Mining (ICDM). IEEE, 197-206.", + "[20] Doyup Lee, Chihuon Kim, Saehoon Kim, Minsu Cho, and Wook-Shin Han. 2022. Autoregressive image generation using residual quantization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 11523-11532.", + "[21] Xiaoxi Li, Jiajie Jin, Yujiia Zhou, Yuyao Zhang, Peitian Zhang, Yutao Zhu, and Zhicheng Dou. 2024. From matching to generation: A survey on generative information retrieval. arXiv preprint arXiv:2404.14851 (2024).", + "[22] Yongqi Li, Nan Yang, Liang Wang, Furu Wei, and Wenjie Li. 2023. Multiview Identifiers Enhanced Generative Retrieval. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 6636-6648.", + "[23] Jiayi Liao, Sihang Li, Zhengyi Yang, Jiancan Wu, Yancheng Yuan, Xiang Wang, and Xiangnan He. 2024. Llara: Large language-recommendation assistant. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1785-1795.", + "[24] Julian McAuley, Christopher Targett, Qinfeng Shi, and Anton Van Den Hengel. 2015. Image-based recommendations on styles and substitutes. In Proceedings of the 38th international ACM SIGIR conference on research and development in information retrieval. 43-52.", + "[25] Gustavo Penha, Ali Vardasbi, Enrico Palumbo, Marco De Nadai, and Hugues Bouchard. 2024. Bridging Search and Recommendation in Generative Retrieval: Does One Task Help the Other?. In Proceedings of the 18th ACM Conference on Recommender Systems. 340-349.", + "[26] Shashank Rajput, Nikhil Mehta, Anima Singh, Raghunandan Hulikal Keshavan, Trung Vu, Lukasz Heldt, Lichan Hong, Yi Tay, Vinh Tran, Jonah Samost, et al. 2023. Recommender systems with generative retrieval. Advances in Neural Information Processing Systems 36 (2023), 10299-10315.", + "[27] Stephen Robertson, Hugo Zaragoza, et al. 2009. The probabilistic relevance framework: BM25 and beyond. Foundations and Trends® in Information Retrieval 3, 4 (2009), 333-389.", + "[28] Chenglei Shen, Xiao Zhang, Teng Shi, Changshuo Zhang, Guofu Xie, and Jun Xu. 2024. A survey of controllable learning: Methods and applications in information retrieval. arXiv preprint arXiv:2407.06083 (2024).", + "[29] Teng Shi, Zihua Si, Jun Xu, Xiao Zhang, Xiaoxue Zang, Kai Zheng, Dewei Leng, Yanan Niu, and Yang Song. 2024. UniSAR: Modeling User Transition Behaviors between Search and Recommendation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1029-1039.", + "[30] Zihua Si, Xueran Han, Xiao Zhang, Jun Xu, Yue Yin, Yang Song, and Ji-Rong Wen. 2022. A Model-Agnostic Causal Learning Framework for Recommendation Using Search Data. In Proceedings of the ACM Web Conference 2022 (Virtual Event, Lyon, France) (WWW '22). Association for Computing Machinery, New York, NY, USA, 224-233." + ], + "bbox": [ + 517, + 108, + 913, + 893 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Unified Generative Search and Recommendation", + "bbox": [ + 84, + 75, + 318, + 85 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 630, + 75, + 913, + 87 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[31] Zihua Si, Zhongxiang Sun, Xiao Zhang, Jun Xu, Xiaoxue Zang, Yang Song, Kun Gai, and Ji-Rong Wen. 2023. When search meets recommendation: Learning disentangled search representation for recommendation. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1313-1323.", + "[32] Fei Sun, Jun Liu, Jian Wu, Changhua Pei, Xiao Lin, Wenwu Ou, and Peng Jiang. 2019. BERT4Rec: Sequential Recommendation with Bidirectional Encoder Representations from Transformer. In Proceedings of the 28th ACM International Conference on Information and Knowledge Management (Beijing, China) (CIKM '19). ACM, New York, NY, USA, 1441-1450.", + "[33] Weiwei Sun, Lingyong Yan, Zheng Chen, Shuaiqiang Wang, Haichao Zhu, Pengjie Ren, Zhumin Chen, Dawei Yin, Maarten Rijke, and Zhaochun Ren. 2024. Learning to tokenize for generative retrieval. Advances in Neural Information Processing Systems 36 (2024).", + "[34] Jiakai Tang, Sunhao Dai, Teng Shi, Jun Xu, Xu Chen, Wen Chen, Wu Jian, and Yuning Jiang. 2025. Think Before Recommend: Unleashing the Latent Reasoning Power for Sequential Recommendation. arXiv:2503.22675 [cs.IR] https://arxiv.org/abs/2503.22675", + "[35] Yi Tay, Vinh Tran, Mostafa Dehghani, Jianmo Ni, Dara Bahri, Harsh Mehta, Zhen Qin, Kai Hui, Zhe Zhao, Jai Gupta, et al. 2022. Transformer memory as a differentiable search index. Advances in Neural Information Processing Systems 35 (2022), 21831-21843.", + "[36] Liang Wang, Nan Yang, Xiaolong Huang, Linjun Yang, Rangan Majumder, and Furu Wei. 2024. Multilingual e5 text embeddings: A technical report. arXiv preprint arXiv:2402.05672 (2024).", + "[37] Yuening Wang, Man Chen, Yaochen Hu, Wei Guo, Yingxue Zhang, Hufeng Guo, Yong Liu, and Mark Coates. 2024. Enhancing Click-through Rate Prediction in Recommendation Domain with Search Query Representation. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 2462-2471.", + "[38] Yujing Wang, Yingyan Hou, Haonan Wang, Ziming Miao, Shibin Wu, Qi Chen, Yuqing Xia, Chengmin Chi, Guoshuai Zhao, Zheng Liu, et al. 2022. A neural corpus indexer for document retrieval. Advances in Neural Information Processing Systems 35 (2022), 25600-25614.", + "[39] Yu Wang, Zhengyang Wang, Hengrui Zhang, Qingyu Yin, Xianfeng Tang, Yinghan Wang, Danqing Zhang, Limeng Cui, Monica Cheng, Bing Yin, et al. 2023. Exploiting intent evolution in e-commercial query recommendation. In Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 5162-5173.", + "[40] Shitao Xiao, Zheng Liu, Peitian Zhang, Niklas Muennighoff, Defu Lian, and Jian-Yun Nie. 2024. C-pack: Packed resources for general chinese embeddings. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 641-649.", + "[41] Jiayi Xie, Shang Liu, Gao Cong, and Zhenzhong Chen. 2024. UnifiedSSR: A Unified Framework of Sequential Search and Recommendation. In Proceedings of the ACM on Web Conference 2024. 3410-3419.", + "[42] Lee Xiong, Chenyan Xiong, Ye Li, Kwok-Fung Tang, Jialin Liu, Paul Bennett, Junaid Ahmed, and Arnold Overwijk. 2020. Approximate nearest neighbor negative contrastive learning for dense text retrieval. arXiv preprint arXiv:2007.00808 (2020).", + "[43] Jing Yao, Zhicheng Dou, Ruobing Xie, Yanxiong Lu, Zhiping Wang, and Ji-Rong Wen. 2021. USER: A Unified Information Search and Recommendation Model Based on Integrated Behavior Sequence. In Proceedings of the 30th ACM International Conference on Information J& Knowledge Management (Virtual Event, Queensland, Australia) (CIKM '21). Association for Computing Machinery, New York, NY, USA, 2373-2382.", + "[44] Zheng Yuan, Fajie Yuan, Yu Song, Youhua Li, Junchen Fu, Fei Yang, Yunzhu Pan, and Yongxin Ni. 2023. Where to go next for recommender systems? id-vs. modality-based recommender models revisited. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2639-2649.", + "[45] Zhenrui Yue, Yueqi Wang, Zhankui He, Huimin Zeng, Julian McAuley, and Dong Wang. 2024. Linear recurrent units for sequential recommendation. In Proceedings of the 17th ACM International Conference on Web Search and Data Mining. 930-938.", + "[46] Hamed Zamani and W. Bruce Croft. 2018. Joint Modeling and Optimization of Search and Recommendation. In Proceedings of the First Biennial Conference on Design of Experimental Search & Information Retrieval Systems, Bertinoro, Italy, August 28-31, 2018 (CEUR Workshop Proceedings, Vol. 2167). CEUR-WS.org, 36-41.", + "[47] Hamed Zamani and W. Bruce Croft. 2020. Learning a Joint Search and Recommendation Model from User-Item Interactions. In Proceedings of the 13th International Conference on Web Search and Data Mining (Houston, TX, USA) (WSDM '20). Association for Computing Machinery, New York, NY, USA, 717–725.", + "[48] Changshuo Zhang, Teng Shi, Xiao Zhang, Qi Liu, Ruobing Xie, Jun Xu, and Ji-Rong Wen. 2024. Modeling Domain and Feedback Transitions for Cross-Domain Sequential Recommendation. arXiv preprint arXiv:2408.08209 (2024).", + "[49] Changshuo Zhang, Teng Shi, Xiao Zhang, Yanping Zheng, Ruobing Xie, Qi Liu, Jun Xu, and Ji-Rong Wen. 2024. QAGCF: Graph Collaborative Filtering for Q&A" + ], + "bbox": [ + 84, + 108, + 480, + 895 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Recommendation. arXiv preprint arXiv:2406.04828 (2024).", + "[50] Changshuo Zhang, Xiao Zhang, Teng Shi, Jun Xu, and Ji-Rong Wen. 2025. Test-Time Alignment for Tracking User Interest Shifts in Sequential Recommendation. arXiv:2504.01489 [cs.IR] https://arxiv.org/abs/2504.01489", + "[51] Kepu Zhang, Teng Shi, Sunhao Dai, Xiao Zhang, Yinfeng Li, Jing Lu, Xiaoxue Zang, Yang Song, and Jun Xu. 2024. SAQRec: Aligning Recommender Systems to User Satisfaction via Questionnaire Feedback. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 3165-3175.", + "[52] Xiao Zhang, Teng Shi, Jun Xu, Zhenhua Dong, and Ji-Rong Wen. 2024. Model-Agnostic Causal Embedding Learning for Counterfactually Group-Fair Recommendation. IEEE Transactions on Knowledge and Data Engineering (2024).", + "[53] Yuting Zhang, Yiqing Wu, Ruidong Han, Ying Sun, Yongchun Zhu, Xiang Li, Wei Lin, Fuzhen Zhuang, Zhulin An, and Yongjun Xu. 2024. Unified Dual-Intent Translation for Joint Modeling of Search and Recommendation. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 6291-6300.", + "[54] Kai Zhao, Yukun Zheng, Tao Zhuang, Xiang Li, and Xiaoyi Zeng. 2022. Joint Learning of E-Commerce Search and Recommendation with a Unified Graph Neural Network. In Proceedings of the Fifteenth ACM International Conference on Web Search and Data Mining (Virtual Event, AZ, USA) (WSDM '22). Association for Computing Machinery, New York, NY, USA, 1461–1469.", + "[55] Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, et al. 2023. A survey of large language models. arXiv preprint arXiv:2303.18223 (2023).", + "[56] Bowen Zheng, Yupeng Hou, Hongyu Lu, Yu Chen, Wayne Xin Zhao, Ming Chen, and Ji-Rong Wen. 2024. Adapting large language models by integrating collaborative semantics for recommendation. In 2024 IEEE 40th International Conference on Data Engineering (ICDE). IEEE, 1435-1448.", + "[57] Kun Zhou, Hui Yu, Wayne Xin Zhao, and Ji-Rong Wen. 2022. Filter-Enhanced MLP is All You Need for Sequential Recommendation. In Proceedings of the ACM Web Conference 2022 (Virtual Event, Lyon, France) (WWW'22). Association for Computing Machinery, New York, NY, USA, 2388-2399.", + "[58] Yujia Zhou, Jing Yao, Ledell Wu, Zhicheng Dou, and Ji-Rong Wen. 2023. WebUltron: An Ultimate Retriever on Webpages Under the Model-Centric Paradigm. IEEE Transactions on Knowledge and Data Engineering (2023).", + "[59] Shengyao Zhuang, Houxing Ren, Linjun Shou, Jian Pei, Ming Gong, Guido Zuccon, and Daxin Jiang. 2022. Bridging the gap between indexing and retrieval for differentiable search index with query generation. arXiv preprint arXiv:2206.10128 (2022)." + ], + "bbox": [ + 517, + 109, + 913, + 502 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 84, + 75, + 366, + 85 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Teng Shi et al.", + "bbox": [ + 841, + 75, + 911, + 85 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05730/2fcd628a-662d-422e-89a8-df9b6d69e542_model.json b/data/2025/2504_05xxx/2504.05730/2fcd628a-662d-422e-89a8-df9b6d69e542_model.json new file mode 100644 index 0000000000000000000000000000000000000000..590422a71b6a2aaf060ed3de8dc69147062c7d68 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/2fcd628a-662d-422e-89a8-df9b6d69e542_model.json @@ -0,0 +1,3300 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.185, + 0.102, + 0.816, + 0.125 + ], + "angle": 0, + "content": "Unified Generative Search and Recommendation" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.133, + 0.268, + 0.149 + ], + "angle": 0, + "content": "Teng Shi" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.15, + 0.327, + 0.165 + ], + "angle": 0, + "content": "Renmin University of China" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.166, + 0.28, + 0.179 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.181, + 0.3, + 0.195 + ], + "angle": 0, + "content": "shiteng@ruc.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.466, + 0.133, + 0.531, + 0.149 + ], + "angle": 0, + "content": "Jun Xu*" + }, + { + "type": "text", + "bbox": [ + 0.451, + 0.151, + 0.548, + 0.167 + ], + "angle": 0, + "content": "Xiao Zhang" + }, + { + "type": "text", + "bbox": [ + 0.406, + 0.168, + 0.594, + 0.182 + ], + "angle": 0, + "content": "Renmin University of China" + }, + { + "type": "text", + "bbox": [ + 0.451, + 0.183, + 0.549, + 0.197 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.399, + 0.198, + 0.599, + 0.212 + ], + "angle": 0, + "content": "{junxu,zhangx89}@ruc.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.71, + 0.133, + 0.826, + 0.149 + ], + "angle": 0, + "content": "Xiaoxue Zang" + }, + { + "type": "text", + "bbox": [ + 0.725, + 0.151, + 0.812, + 0.168 + ], + "angle": 0, + "content": "Kai Zheng" + }, + { + "type": "text", + "bbox": [ + 0.666, + 0.168, + 0.872, + 0.182 + ], + "angle": 0, + "content": "Kuaishou Technology Co., Ltd." + }, + { + "type": "text", + "bbox": [ + 0.72, + 0.183, + 0.816, + 0.197 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.707, + 0.198, + 0.829, + 0.211 + ], + "angle": 0, + "content": "xxic666@126.com" + }, + { + "type": "text", + "bbox": [ + 0.693, + 0.213, + 0.842, + 0.227 + ], + "angle": 0, + "content": "zhengk92@gmail.com" + }, + { + "type": "text", + "bbox": [ + 0.321, + 0.239, + 0.408, + 0.256 + ], + "angle": 0, + "content": "Yang Song" + }, + { + "type": "text", + "bbox": [ + 0.263, + 0.256, + 0.469, + 0.27 + ], + "angle": 0, + "content": "Kuaishou Technology Co., Ltd." + }, + { + "type": "text", + "bbox": [ + 0.317, + 0.271, + 0.413, + 0.285 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.316, + 0.286, + 0.413, + 0.301 + ], + "angle": 0, + "content": "ys@sonyis.me" + }, + { + "type": "text", + "bbox": [ + 0.592, + 0.239, + 0.673, + 0.256 + ], + "angle": 0, + "content": "Enyun Yu" + }, + { + "type": "text", + "bbox": [ + 0.591, + 0.256, + 0.677, + 0.27 + ], + "angle": 0, + "content": "Independent" + }, + { + "type": "text", + "bbox": [ + 0.585, + 0.271, + 0.682, + 0.285 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.568, + 0.287, + 0.698, + 0.301 + ], + "angle": 0, + "content": "yuenyun@126.com" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.273, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.05730v2 [cs.IR] 10 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.31, + 0.158, + 0.324 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.328, + 0.485, + 0.675 + ], + "angle": 0, + "content": "Modern commercial platforms typically offer both search and recommendation functionalities to serve diverse user needs, making joint modeling of these tasks an appealing direction. While prior work has shown that integrating search and recommendation can be mutually beneficial, it also reveals a performance trade-off: enhancements in one task often come at the expense of the other. This challenge arises from their distinct information requirements: search emphasizes semantic relevance between queries and items, whereas recommendation depends more on collaborative signals among users and items. Effectively addressing this trade-off requires tackling two key problems: (1) integrating both semantic and collaborative signals into item representations, and (2) guiding the model to distinguish and adapt to the unique demands of search and recommendation. The emergence of generative retrieval with Large Language Models (LLMs) presents new possibilities. This paradigm encodes items as identifiers and frames both search and recommendation as sequential generation tasks, offering the flexibility to leverage multiple identifiers and task-specific prompts. In light of this, we introduce GenSAR, a unified generative framework for balanced search and recommendation. Our approach designs dual-purpose identifiers and tailored training strategies to incorporate complementary signals and align with task-specific objectives. Experiments on both public and commercial datasets demonstrate that GenSAR effectively reduces the trade-off and achieves state-of-the-art performance on both tasks." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.687, + 0.203, + 0.702 + ], + "angle": 0, + "content": "CCS Concepts" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.705, + 0.484, + 0.732 + ], + "angle": 0, + "content": "- Information systems \\(\\rightarrow\\) Recommender systems; Personalization." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.739, + 0.483, + 0.771 + ], + "angle": 0, + "content": "*Corresponding authors. Work partially done at Engineering Research Center of Next-Generation Intelligent Search and Recommendation, Ministry of Education. Work done when Teng Shi was the intern at Kuaishou." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.78, + 0.482, + 0.853 + ], + "angle": 0, + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.854, + 0.356, + 0.864 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.865, + 0.473, + 0.875 + ], + "angle": 0, + "content": "© 2018 Copyright held by the owner/author(s). Publication rights licensed to ACM." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.875, + 0.265, + 0.884 + ], + "angle": 0, + "content": "ACM ISBN 978-1-4503-XXXX-X/18/06" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.885, + 0.266, + 0.896 + ], + "angle": 0, + "content": "https://doi.org/XXXXXXXXXXXXXXXXXX" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.31, + 0.602, + 0.326 + ], + "angle": 0, + "content": "Keywords" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.329, + 0.816, + 0.343 + ], + "angle": 0, + "content": "Recommendation; Search; Large Language Model" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.357, + 0.662, + 0.368 + ], + "angle": 0, + "content": "ACM Reference Format:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.369, + 0.924, + 0.433 + ], + "angle": 0, + "content": "Teng Shi, Jun Xu, Xiao Zhang, Xiaoxue Zang, Kai Zheng, Yang Song, and Enyun Yu. 2018. Unified Generative Search and Recommendation. In Proceedings of Make sure to enter the correct conference title from your rights confirmation emai (Conference acronym 'XX). ACM, New York, NY, USA, 10 pages. https://doi.org/XXXXXXXXX.XXXXXXX" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.463, + 0.651, + 0.477 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.481, + 0.916, + 0.563 + ], + "angle": 0, + "content": "To facilitate the diverse ways of information access, many commercial platforms, such as e-commerce, video, and music platforms, offer both search [2, 3, 6, 7] and recommendation [34, 48-52] (S&R) services. This provides an opportunity for joint modeling of S&R, enabling better user interest modeling and enhancing the performance of both tasks." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.564, + 0.916, + 0.73 + ], + "angle": 0, + "content": "Many studies have explored joint modeling of S&R, including: leveraging recommendation to enhance search [2, 3, 6, 7], using search to enhance recommendation [15, 30, 31, 37], and unified S&R modeling [29, 41, 43, 46, 47]. Although these studies have demonstrated that S&R can mutually enhance each other, they have also identified a trade-off when the model serves both tasks simultaneously [29]. Specifically, when the recommendation performance improves, the search performance tends to degrade, and vice versa. Empirical analysis of the representative methods of JSR [46] and UniSAR [29] based on a S&R dataset collected from a real commercial platform also confirmed the performance trade-off, as shown in Figure 1(a). More details please refer to Section 4.1.1." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.73, + 0.916, + 0.897 + ], + "angle": 0, + "content": "Analysis also showed that the trade-off is rooted in the different information requirements of S&R. Search typically focuses more on the semantic relevance between queries and items, with traditional search models often based on pre-trained language models [18, 40, 42]. In contrast, recommendation heavily relies on collaborative information, where ID-based recommendation can yield excellent results [14, 19, 44]. Figure 1(b) shows an empirical validation where the S&R performances with ID- and Text-only embeddings are shown. The ID embeddings are randomly initialized and trained, containing collaborative information, while the Text embeddings are trained with BGE [40] and then reduced to the same dimensionality as that of the ID embeddings, containing semantic" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.367, + 0.088 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "header", + "bbox": [ + 0.843, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "Teng Shi et al." + }, + { + "type": "image", + "bbox": [ + 0.1, + 0.11, + 0.278, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.123, + 0.198, + 0.253, + 0.21 + ], + "angle": 0, + "content": "(a) Trade-off between S&R" + }, + { + "type": "image", + "bbox": [ + 0.286, + 0.11, + 0.468, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.283, + 0.198, + 0.471, + 0.21 + ], + "angle": 0, + "content": "(b) Performance of different embeddings" + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.214, + 0.483, + 0.299 + ], + "angle": 0, + "content": "Figure 1: Empirical analysis on the Commercial dataset: (a) A trade-off between S&R is observed in representative joint S&R methods, JSR [46] and UniSAR [29]. (b) The performance of the sequential recommendation model SASRec [19] and the product search model QEM [2], using ID and text embeddings, respectively." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.315, + 0.483, + 0.356 + ], + "angle": 0, + "content": "information. From Figure 1(b), we found that recommendation relies more on collaborative information while search focuses more on semantic information." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.357, + 0.483, + 0.522 + ], + "angle": 0, + "content": "Therefore, balancing the semantic information required for search and the collaborative information needed for recommendation becomes a key issue in joint S&R modeling. It is non-trivial and faces two major challenges: (1) How to incorporate both semantic and collaborative information in item representations. Existing joint S&R models typically assign a single representation to each item, making it difficult to capture both types of information effectively; (2) How to let the model understand the difference in information requirements of S&R during training. Current joint models often treat S&R tasks identically, without differentiating them during training. This makes it challenging for the model to grasp their distinct requirements." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.522, + 0.483, + 0.661 + ], + "angle": 0, + "content": "Recently, Large Language Model (LLM) [55]-based generative retrieval for search [35, 59] and recommendation [11, 26, 56] have garnered significant attention. This provides a solution to the aforementioned challenges: (1) Generative retrieval assigns an identifier (a sequence of tokens) to each item, allowing us to assign multiple identifiers to each item to balance semantic and collaborative information; (2) Generative retrieval formulates both S&R as sequence-to-sequence (Seq2Seq) tasks, enabling the unification of different S&R tasks and helping the model better understand the distinct requirements of each task." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.661, + 0.483, + 0.897 + ], + "angle": 0, + "content": "Based on this, we propose GenSAR, which unifies balanced search and recommendation through generative retrieval, thereby alleviating the trade-off between S&R to better enhance each other. Firstly, we design a joint S&R identifier that integrates both semantic and collaborative information. Building on the RQ-VAE [26, 56] method, we employ shared codebooks for both semantic and collaborative information, alongside specific codebooks for each. As a result, items from search are represented by semantic codes, while items from recommendation are represented by collaborative codes. These two codes share a common portion to capture shared information while also retaining distinct parts to preserve the unique characteristics of semantic and collaborative information. Secondly, we design the joint S&R training tasks. We prepend a token representing the behavior type to the item identifier and then input the user's S&R history into the LLM (with the user query also provided for search). Different prompts are used to guide LLMs to predict the next recommended item, the next searched query," + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.107, + 0.913, + 0.135 + ], + "angle": 0, + "content": "and the next searched item, enabling the model to understand the distinct requirements for S&R." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.135, + 0.915, + 0.203 + ], + "angle": 0, + "content": "The major contributions of the paper are summarized as follows: We verified the existence of the trade-off between S&R, and identified that this trade-off arises from the different information requirements of S&R. Additionally, we have analyzed the challenges in balancing semantic and collaborative information needed for S&R." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.204, + 0.915, + 0.272 + ], + "angle": 0, + "content": "- We propose GenSAR, which unifies balanced S&R through generative retrieval. We designed a joint S&R identifier to balance semantic and collaborative information, and developed joint training tasks to help the model understand the different requirements of each task." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.273, + 0.913, + 0.315 + ], + "angle": 0, + "content": "- Experimental results on two datasets validate the effectiveness of GenSAR. GenSAR not only surpasses traditional S&R models but also outperforms generative S&R models." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.327, + 0.659, + 0.341 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.349, + 0.915, + 0.529 + ], + "angle": 0, + "content": "Joint Search and Recommendation. Joint modeling of S&R has attracted increasing attention in recent years and can be broadly categorized into three types: (1) Enhancing search with recommendation [2, 3, 6, 7], such as TEM [6], which uses Transformers to model user preferences, and CoPPS [7], which applies contrastive learning to address data sparsity. (2) Enhancing recommendation with search [15, 30, 31, 37], e.g., SESRec [31], which disentangles similar and dissimilar interests from both histories. (3) Unified modeling of S&R [29, 41, 43, 46, 47, 53, 54], such as JSR [46, 47] with joint loss and UniSAR [29], which models behavior transitions. While these works show mutual benefits between S&R, they also reveal a trade-off [28, 29]. This paper addresses that trade-off within a generative retrieval framework." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.533, + 0.915, + 0.755 + ], + "angle": 0, + "content": "Generative Search and Recommendation. With the rise of Large Language Models (LLMs) [55], LLM-based generative retrieval has been widely explored for both search [5, 21, 33, 35, 38, 58, 59] and recommendation [11, 17, 25, 26, 56]. These methods represent items as identifiers and input the user query (for search) or user history (for recommendation) into the LLM to generate the target item. Identifier designs can be grouped into: (1) Text-based, using item titles [8, 23] or substrings [5, 22]; (2) Non-learnable ID-based, with early methods assigning random IDs [11], and later ones using clustering to encode semantic or collaborative structure [17, 35, 38]; (3) Learnable codebook-based, applying techniques like RQ-VAE [26, 56] to learn identifiers from semantic or collaborative embeddings. However, most existing approaches design identifiers tailored to either search or recommendation, focusing solely on semantic or collaborative information. In joint S&R, balancing both is essential for strong performance across tasks." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.767, + 0.664, + 0.783 + ], + "angle": 0, + "content": "3 Our Approach" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.786, + 0.915, + 0.895 + ], + "angle": 0, + "content": "This section introduces our proposed method, GenSAR. Section 3.1 defines the Joint Search and Recommendation task. Section 3.2 presents the Joint Identifier module, where we design separate semantic and collaborative identifiers to balance the different needs of search and recommendation. Section 3.3 describes task-specific training objectives to help the model capture both types of information. Finally, Section 3.4 details the training and inference process of GenSAR." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.319, + 0.087 + ], + "angle": 0, + "content": "Unified Generative Search and Recommendation" + }, + { + "type": "header", + "bbox": [ + 0.631, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "image", + "bbox": [ + 0.107, + 0.106, + 0.891, + 0.376 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.379, + 0.916, + 0.435 + ], + "angle": 0, + "content": "Figure 2: The joint search and recommendation identifier. We extract the semantic and collaborative embeddings for each item. These two embeddings are first concatenated and passed through the shared codebooks to learn shared codes. Then, the semantic and collaborative embeddings are separately processed through specific codebooks to learn specific codes. Finally, these codes are concatenated to form two identifiers for each item: one for semantics and one for collaboration." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.441, + 0.305, + 0.455 + ], + "angle": 0, + "content": "3.1 Problem Formulation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.459, + 0.483, + 0.581 + ], + "angle": 0, + "content": "Let \\(\\mathcal{U},\\mathcal{V},Q\\) denote the sets of users, items, and queries, respectively. Each user \\(u\\in \\mathcal{U}\\) has a chronologically ordered interaction history \\(S_{u} = [(b_{1},x_{1}),(b_{2},x_{2}),\\ldots ,(b_{N},x_{N})]\\) that includes her historical S&R behaviors, where \\(N\\) denotes the number of \\(u\\) 's historical behaviors. \\(b_{i}\\in \\{\\langle \\mathrm{R}_{\\mathrm{I}}\\rangle ,\\langle \\mathrm{S}_{\\mathrm{Q}}\\rangle ,\\langle \\mathrm{S}_{\\mathrm{I}}\\rangle \\}\\) represents the type of the \\(i\\) -th behavior: \\(\\langle \\mathrm{R_I}\\rangle\\) indicates an item clicked by the user after a recommendation, \\(\\langle \\mathrm{S_Q}\\rangle\\) represents a query searched by the user, and \\(\\langle \\mathrm{S_I}\\rangle\\) denotes an item clicked by the user after searching a query. \\(x_{i}\\) denotes the \\(i\\) -th behavior:" + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.582, + 0.482, + 0.618 + ], + "angle": 0, + "content": "\\[\nx _ {i} = \\left\\{ \\begin{array}{l l} v _ {i}, & \\text {i f} b _ {i} = \\langle \\mathrm {R} _ {\\mathrm {I}} \\rangle \\text {o r} b _ {i} = \\langle \\mathrm {S} _ {\\mathrm {I}} \\rangle , \\\\ q _ {i}, & \\text {i f} b _ {i} = \\langle \\mathrm {S} _ {\\mathrm {Q}} \\rangle , \\end{array} \\right. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.62, + 0.483, + 0.678 + ], + "angle": 0, + "content": "where \\( v_{i} \\in \\mathcal{V} \\) denotes the \\( i \\)-th interacted item and \\( q_{i} \\in Q \\) is the \\( i \\)-th searched query. Our goal is to enable the model to understand user interests and predict the next item \\( v_{N+1} \\) for search when \\( b_{N+1} = \\langle \\mathrm{S}_{\\mathrm{I}} \\rangle \\) or recommendation when \\( b_{N+1} = \\langle \\mathrm{R}_{\\mathrm{I}} \\rangle \\)." + }, + { + "type": "title", + "bbox": [ + 0.083, + 0.688, + 0.483, + 0.705 + ], + "angle": 0, + "content": "3.2 Joint Search and Recommendation Identifier" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.707, + 0.483, + 0.805 + ], + "angle": 0, + "content": "This section introduces the design of the joint S&R identifier (Figure 2). We first extract semantic and collaborative embeddings for each item. Using RQ-VAE [20, 26, 56], we apply both shared and separate codebooks to learn two identifiers per item—one semantic, one collaborative. The identifiers share common parts to capture shared information, while retaining unique parts to reflect task-specific features." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.812, + 0.483, + 0.897 + ], + "angle": 0, + "content": "3.2.1 Embedding Extraction. For each item \\( v \\in \\mathcal{V} \\), we can input its textual information, such as the title and description, into a pre-trained retrieval model (e.g., BERT [10], BGE [40]) to obtain an embedding \\( \\mathbf{v}_s \\in \\mathbb{R}^{d_s} \\) that contains its semantic information. Meanwhile, we can also obtain an embedding \\( \\mathbf{v}_c \\in \\mathbb{R}^{d_c} \\) containing its collaborative information from a pre-trained recommendation" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.441, + 0.916, + 0.498 + ], + "angle": 0, + "content": "model (e.g., SASRec [19], BERT4Rec [32]). \\(d_{s}\\) and \\(d_{c}\\) represent the dimensions of the semantic and collaborative embeddings, respectively. We map the semantic and collaborative embeddings to the same-dimensional latent space using two encoders:" + }, + { + "type": "equation", + "bbox": [ + 0.588, + 0.522, + 0.915, + 0.537 + ], + "angle": 0, + "content": "\\[\n\\mathbf {z} _ {s} = \\operatorname {E n c o d e r} _ {s} (\\mathbf {v} _ {s}), \\quad \\mathbf {z} _ {c} = \\operatorname {E n c o d e r} _ {c} (\\mathbf {v} _ {c}), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.56, + 0.916, + 0.603 + ], + "angle": 0, + "content": "where \\( \\mathbf{z}_s \\in \\mathbb{R}^d, \\mathbf{z}_c \\in \\mathbb{R}^d \\) and \\( d \\) is the dimension of the latent embeddings, \\( \\mathrm{Encoder}_s(\\cdot) \\) and \\( \\mathrm{Encoder}_c(\\cdot) \\) are two MLPs (Multilayer Perceptrons)." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.62, + 0.916, + 0.834 + ], + "angle": 0, + "content": "3.2.2 Residual Quantization. To integrate both semantic and collaborative information, we use \\( L_{m} \\)-level shared codebooks, along with \\( L_{n} \\)-level specific codebooks for semantic and collaborative information, respectively. First, the latent embeddings for semantic and collaborative information, \\( \\mathbf{z}_s \\) and \\( \\mathbf{z}_c \\), are concatenated to form \\( \\mathbf{r}_0^m = [\\mathbf{z}_s; \\mathbf{z}_c] \\in \\mathbb{R}^{2d} \\). This \\( \\mathbf{r}_0^m \\) is then passed through the \\( L_{m} \\)-level shared codebooks to obtain the shared codes \\( I_m \\) and the residual embedding \\( \\mathbf{r}_{L_m}^m \\). Then, we extract the semantic part \\( \\mathbf{r}_0^s = \\mathbf{r}_{L_m}^m [1:d] \\in \\mathbb{R}^d \\) and the collaborative part \\( \\mathbf{r}_0^c = \\mathbf{r}_{L_m}^m [d:2d] \\in \\mathbb{R}^d \\) from \\( \\mathbf{r}_{L_m}^m \\), and input them separately into the semantic and collaborative codebooks to learn their specific codes \\( I_s \\) and \\( I_c \\), respectively. Finally, the shared and specific codes are concatenated, resulting in two identifiers, \\( I_{m+s} \\) and \\( I_{m+c} \\), for each item. Next, we will introduce the residual quantization process for both the shared and specific codebooks." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.838, + 0.915, + 0.897 + ], + "angle": 0, + "content": "- Shared Codebooks. We have \\( L_{m} \\)-level shared codebooks. At each level \\( i \\in \\{1, 2, \\dots, L_{m}\\} \\), we have a shared codebook \\( C_{i}^{m} = \\{\\mathbf{e}_{k}\\}_{k=1}^{K} \\) where \\( K \\) is the size of each codebook and \\( \\mathbf{e}_{k} \\in \\mathbb{R}^{2d} \\) is a learnable code embedding. The residual quantization process for the shared" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.368, + 0.088 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "header", + "bbox": [ + 0.843, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "Teng Shi et al." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.107, + 0.232, + 0.12 + ], + "angle": 0, + "content": "codebooks is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.161, + 0.123, + 0.402, + 0.149 + ], + "angle": 0, + "content": "\\[\nc _ {i} ^ {m} = \\underset {k} {\\arg \\min} | | \\mathbf {r} _ {i - 1} ^ {m} - \\mathbf {e} _ {k} | | _ {2} ^ {2}, \\quad \\mathbf {e} _ {k} \\in C _ {i} ^ {m},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.164, + 0.15, + 0.403, + 0.169 + ], + "angle": 0, + "content": "\\[\n\\mathbf {r} _ {i} ^ {m} = \\mathbf {r} _ {i - 1} ^ {m} - \\mathbf {e} _ {c _ {i} ^ {m}}, \\quad \\mathbf {r} _ {0} ^ {m} = [ \\mathbf {z} _ {s}; \\mathbf {z} _ {c} ] \\in \\mathbb {R} ^ {2 d},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.172, + 0.483, + 0.235 + ], + "angle": 0, + "content": "where \\( c_{i}^{m} \\) is the assigned code from the \\( i \\)-th level of the shared codebook. \\( \\mathbf{r}_{i-1}^{m} \\) is the residual from last level. Through the recursive quantization in Eq. (3), we can obtain the shared codes \\( I_{m} = \\left[c_{1}^{m}, c_{2}^{m}, \\ldots, c_{L_{m}}^{m}\\right] \\) and the residual embedding \\( \\mathbf{r}_{L_{m}}^{m} \\)." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.238, + 0.484, + 0.379 + ], + "angle": 0, + "content": "- Specific Codebooks. We can extract the semantic part \\(\\mathbf{r}_0^s = \\mathbf{r}_{L_m}^m [1:d] \\in \\mathbb{R}^d\\) and the collaborative part \\(\\mathbf{r}_0^c = \\mathbf{r}_{L_m}^m [d:2d] \\in \\mathbb{R}^d\\) from the residual embedding \\(\\mathbf{r}_{L_m}^m\\) outputted by the shared codebooks. We then pass them separately through the \\(L_n\\)-level semantic and collaborative specific codebooks \\(C_i^s\\) and \\(C_i^c\\), where \\(i \\in \\{1, 2, \\dots, L_n\\}\\). Please note that, unlike the shared codebook whose code embeddings are \\(2d\\)-dimensional, the code embeddings of the specific codebooks are \\(d\\)-dimensional. The residual quantization process for the specific codebooks can be formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.167, + 0.378, + 0.397, + 0.402 + ], + "angle": 0, + "content": "\\[\nc_{i}^{s} = \\operatorname *{arg min}_{k}\\left|\\left|\\mathbf{r}_{i - 1}^{s} - \\mathbf{e}_{k}\\right|\\right|_{2}^{2},\\quad \\mathbf{e}_{k}\\in C_{i}^{s},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.168, + 0.404, + 0.482, + 0.43 + ], + "angle": 0, + "content": "\\[\nc _ {i} ^ {c} = \\underset {k} {\\arg \\min } \\left\\| \\mathbf {r} _ {i - 1} ^ {c} - \\mathbf {e} _ {k} \\right\\| _ {2} ^ {2}, \\quad \\mathbf {e} _ {k} \\in C _ {i} ^ {c}, \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.168, + 0.432, + 0.372, + 0.449 + ], + "angle": 0, + "content": "\\[\n\\mathbf {r} _ {i} ^ {s} = \\mathbf {r} _ {i - 1} ^ {s} - \\mathbf {e} _ {c _ {i} ^ {s}}, \\quad \\mathbf {r} _ {i} ^ {c} = \\mathbf {r} _ {i - 1} ^ {c} - \\mathbf {e} _ {c _ {i} ^ {c}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.45, + 0.483, + 0.505 + ], + "angle": 0, + "content": "where \\( c_i^s \\) and \\( c_i^r \\) represent the codes assigned by the \\( i \\)-th level semantic-specific and collaborative-specific codebooks, respectively. Through the recursive quantization in Eq. (4), we can obtain the semantic-specific and collaborative-specific codes as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.145, + 0.509, + 0.419, + 0.533 + ], + "angle": 0, + "content": "\\[\nI _ {s} = \\left[ c _ {1} ^ {s}, c _ {2} ^ {s}, \\dots , c _ {L _ {n}} ^ {s} \\right], \\quad I _ {c} = \\left[ c _ {1} ^ {c}, c _ {2} ^ {c}, \\dots , c _ {L _ {n}} ^ {c} \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.536, + 0.483, + 0.577 + ], + "angle": 0, + "content": "Finally, by concatenating the shared codes and the specific codes, we can obtain the semantic identifier \\( I_{m + s} \\) and collaborative identifier \\( I_{m + c} \\) for item \\( v \\):" + }, + { + "type": "equation", + "bbox": [ + 0.16, + 0.579, + 0.404, + 0.603 + ], + "angle": 0, + "content": "\\[\nI _ {m + s} = \\left[ c _ {1} ^ {m}, c _ {2} ^ {m}, \\dots , c _ {L _ {m}} ^ {m}, c _ {1} ^ {s}, c _ {2} ^ {s}, \\dots , c _ {L _ {n}} ^ {s} \\right],\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.162, + 0.604, + 0.482, + 0.629 + ], + "angle": 0, + "content": "\\[\nI _ {m + c} = \\left[ c _ {1} ^ {m}, c _ {2} ^ {m}, \\dots , c _ {L _ {m}} ^ {m}, c _ {1} ^ {c}, c _ {2} ^ {c}, \\dots , c _ {L _ {n}} ^ {c} \\right]. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.634, + 0.483, + 0.675 + ], + "angle": 0, + "content": "3.2.3 Identifier Training. After passing through the shared and specific codebooks, we can obtain the semantic and collaborative quantized embeddings as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.096, + 0.679, + 0.483, + 0.717 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {z}} _ {s} = \\sum_ {i = 1} ^ {L _ {m}} \\mathbf {e} _ {c _ {i} ^ {m}} [ 1: d ] + \\sum_ {i = 1} ^ {L _ {n}} \\mathbf {e} _ {c _ {i} ^ {s}}, \\quad \\hat {\\mathbf {z}} _ {c} = \\sum_ {i = 1} ^ {L _ {m}} \\mathbf {e} _ {c _ {i} ^ {m}} [ d: 2 d ] + \\sum_ {i = 1} ^ {L _ {n}} \\mathbf {e} _ {c _ {i} ^ {c}}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.722, + 0.483, + 0.811 + ], + "angle": 0, + "content": "where \\(\\mathbf{e}_{c_i^m} \\in \\mathbb{R}^{2d}\\) is the code embedding of the shared codebooks, \\(\\mathbf{e}_{c_i^s} \\in \\mathbb{R}^d\\) and \\(\\mathbf{e}_{c_i^c} \\in \\mathbb{R}^d\\) are the code embeddings of the semantic and collaborative specific codebooks. The quantized semantic embedding \\(\\hat{\\mathbf{z}}_s \\in \\mathbb{R}^d\\) and collaborative embedding \\(\\hat{\\mathbf{z}}_c \\in \\mathbb{R}^d\\) will be used to reconstruct the original semantic and collaborative embeddings, \\(\\mathbf{v}_s\\) and \\(\\mathbf{v}_c\\):" + }, + { + "type": "equation", + "bbox": [ + 0.157, + 0.816, + 0.483, + 0.831 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {v}} _ {s} = \\operatorname {D e c o d e r} _ {s} (\\hat {\\mathbf {z}} _ {s}), \\quad \\hat {\\mathbf {v}} _ {c} = \\operatorname {D e c o d e r} _ {c} (\\hat {\\mathbf {z}} _ {c}), \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.836, + 0.483, + 0.876 + ], + "angle": 0, + "content": "where \\(\\mathrm{Decoderr}_s(\\cdot)\\) and \\(\\mathrm{Decoderr}_c(\\cdot)\\) are two MLPs. We can compute the reconstruction loss used for training the encoder and decoder as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.175, + 0.881, + 0.483, + 0.898 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {R e c o n}} = \\left\\| \\mathbf {v} _ {s} - \\hat {\\mathbf {v}} _ {s} \\right\\| _ {2} ^ {2} + \\left\\| \\mathbf {v} _ {c} - \\hat {\\mathbf {v}} _ {c} \\right\\| _ {2} ^ {2}. \\tag {8}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.105, + 0.9, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.528, + 0.248, + 0.9, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.341, + 0.915, + 0.424 + ], + "angle": 0, + "content": "Figure 3: Training and Inference Process of GenSAR. During training, we provide LLM with different instructions to generate corresponding responses. During inference, we append a token at the end of the instruction to indicate the type of behavior to be predicted, enabling the LLM to be applied to either search or recommendation tasks." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.434, + 0.913, + 0.449 + ], + "angle": 0, + "content": "We can also compute the loss for residual quantization as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.554, + 0.454, + 0.875, + 0.492 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {R Q}} ^ {m} = \\sum_ {i = 1} ^ {L _ {m}} | | \\mathrm {s g} [ \\mathbf {r} _ {i - 1} ^ {m} ] - \\mathbf {e} _ {c _ {i} ^ {m}} | | _ {2} ^ {2} + \\alpha | | \\mathbf {r} _ {i - 1} ^ {m} - \\mathrm {s g} [ \\mathbf {e} _ {c _ {i} ^ {m}} ] | | _ {2} ^ {2},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.555, + 0.495, + 0.913, + 0.533 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {R Q}} ^ {s} = \\sum_ {i = 1} ^ {L _ {n}} | | \\operatorname {s g} \\left[ \\mathbf {r} _ {i - 1} ^ {s} \\right] - \\mathbf {e} _ {c _ {i} ^ {s}} | | _ {2} ^ {2} + \\alpha | | \\mathbf {r} _ {i - 1} ^ {s} - \\operatorname {s g} \\left[ \\mathbf {e} _ {c _ {i} ^ {s}} \\right] | | _ {2} ^ {2}, \\tag {9}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.555, + 0.536, + 0.866, + 0.573 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {R Q}} ^ {c} = \\sum_ {i = 1} ^ {L _ {n}} | | \\mathrm {s g} [ \\mathbf {r} _ {i - 1} ^ {c} ] - \\mathbf {e} _ {c _ {i} ^ {c}} | | _ {2} ^ {2} + \\alpha | | \\mathbf {r} _ {i - 1} ^ {c} - \\mathrm {s g} [ \\mathbf {e} _ {c _ {i} ^ {c}} ] | | _ {2} ^ {2},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.556, + 0.575, + 0.72, + 0.593 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {R Q}} = \\mathcal {L} _ {\\mathrm {R Q}} ^ {m} + \\mathcal {L} _ {\\mathrm {R Q}} ^ {s} + \\mathcal {L} _ {\\mathrm {R Q}} ^ {c},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.599, + 0.915, + 0.653 + ], + "angle": 0, + "content": "where \\(\\mathrm{sg}[\\cdot ]\\) denotes the stop-gradient operation and \\(\\alpha\\) is a hyperparameter. \\(\\mathcal{L}_{\\mathrm{RQ}}\\) is used to train the code embeddings in both the shared and specific codebooks. Finally, the total loss for training the identifier is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.633, + 0.662, + 0.913, + 0.677 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {R Q - V A E}} = \\mathcal {L} _ {\\mathrm {R e c o n}} + \\mathcal {L} _ {\\mathrm {R Q}}. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.684, + 0.915, + 0.797 + ], + "angle": 0, + "content": "3.2.4 Behavior-aware Identifier. After learning the semantic and collaborative identifiers for each item, we can represent each user interaction \\((b_{i},x_{i})\\) as shown in Eq. (1). To help the model understand different behaviors in the user's interaction history, we preprocess a token indicating the behavior type to each interaction's identifier. For interactions involving items, we preprocess the corresponding behavior token to the identifier of each item. For interactions involving queries, we preprocess the behavior token to the word sequence of the query. It can be formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.524, + 0.803, + 0.913, + 0.868 + ], + "angle": 0, + "content": "\\[\n\\operatorname {I D} \\left(b _ {i}, x _ {i}\\right) = \\left\\{ \\begin{array}{l l} {\\left[ \\langle \\mathrm {R} _ {\\mathrm {I}} \\rangle , c _ {1} ^ {m}, c _ {2} ^ {m}, \\dots , c _ {L _ {m}} ^ {m}, c _ {1} ^ {c}, c _ {2} ^ {c}, \\dots , c _ {L _ {n}} ^ {c} \\right],} & \\text {i f} b _ {i} = \\langle \\mathrm {R} _ {\\mathrm {I}} \\rangle , \\\\ {\\left[ \\langle \\mathrm {S} _ {\\mathrm {Q}} \\rangle , w _ {1}, w _ {2}, \\dots , w _ {| q _ {i} |} \\right],} & \\text {i f} b _ {i} = \\langle \\mathrm {S} _ {\\mathrm {Q}} \\rangle , \\\\ {\\left[ \\langle \\mathrm {S} _ {\\mathrm {I}} \\rangle , c _ {1} ^ {m}, c _ {2} ^ {m}, \\dots , c _ {L _ {m}} ^ {m}, c _ {1} ^ {s}, c _ {2} ^ {s}, \\dots , c _ {L _ {n}} ^ {s} \\right],} & \\text {i f} b _ {i} = \\langle \\mathrm {S} _ {\\mathrm {I}} \\rangle , \\end{array} \\right. \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.868, + 0.913, + 0.897 + ], + "angle": 0, + "content": "where \\(\\left[w_{1},w_{2},\\dots ,w_{|q_{i}}\\right]\\) are the words of query \\(q_{i}\\). \\(\\mathrm{ID}(\\cdot)\\) denotes the function for obtaining the identifier of each interaction." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.319, + 0.087 + ], + "angle": 0, + "content": "Unified Generative Search and Recommendation" + }, + { + "type": "header", + "bbox": [ + 0.631, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.106, + 0.482, + 0.122 + ], + "angle": 0, + "content": "3.3 Joint Search and Recommendation Training" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.125, + 0.483, + 0.166 + ], + "angle": 0, + "content": "To better adapt the LLM to joint S&R tasks, we design training objectives that help it understand user behaviors and effectively learn both semantic and collaborative identifiers." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.186, + 0.484, + 0.298 + ], + "angle": 0, + "content": "3.3.1 Next Recommendation Item Prediction. To enable the LLM to perform well on the recommendation task, we let it predict the next recommended item. Unlike previous generative recommendation models [11, 26, 56] that only use the user's recommendation history, our approach incorporates search history as well. This allows the LLM to better leverage the user's historical information and understand the relationship between S&R behaviors. A sample of the data is shown below:" + }, + { + "type": "title", + "bbox": [ + 0.11, + 0.32, + 0.348, + 0.333 + ], + "angle": 0, + "content": "Next Recommendation Item Prediction" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.344, + 0.457, + 0.359 + ], + "angle": 0, + "content": "Instruction: Below is the user's interaction history: \\(\\langle S_0\\rangle\\)" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.359, + 0.457, + 0.372 + ], + "angle": 0, + "content": "Piano; \\(\\langle S_I\\rangle < M_{1 - }247 > < M_{2 - }197 > < S_{1 - }184 > < S_{2 - }110>\\)" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.372, + 0.457, + 0.386 + ], + "angle": 0, + "content": "...; \\(\\langle R_I\\rangle\\) ." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.387, + 0.456, + 0.399 + ], + "angle": 0, + "content": "Please recommend the next item the user is likely to click." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.401, + 0.439, + 0.414 + ], + "angle": 0, + "content": "Response: \\(\\langle \\mathrm{R_I}\\rangle < \\mathrm{M_{1\\_}10} > < \\mathrm{M_{2\\_}25} > < \\mathrm{R_{1\\_}52} > < \\mathrm{R_{2\\_}37}>\\)" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.44, + 0.483, + 0.496 + ], + "angle": 0, + "content": "Here, \\( \\langle \\mathrm{M}_{1}10\\rangle < \\mathrm{M}_{2}25\\rangle \\) represents the shared semantic and collaborative identifier of the item, \\( \\langle \\mathrm{S}_{1}184\\rangle < \\mathrm{S}_{2}110\\rangle \\) represents the semantic-specific identifier, and \\( \\langle \\mathrm{R}_{1}52\\rangle < \\mathrm{R}_{2}37\\rangle \\) represents the collaborative-specific identifier." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.516, + 0.483, + 0.627 + ], + "angle": 0, + "content": "3.3.2 Next Search Query Prediction. Some works focus on query recommendation [4, 12, 39], where they predict the next query a user is likely to search. Since our user interaction history also includes search queries, we introduce a task that allows the LLM to predict the user's next intended search query based on their history. This helps the model better understand user search intent and the relationship between S&R behaviors. A sample of the data for this task is as follows:" + }, + { + "type": "title", + "bbox": [ + 0.11, + 0.644, + 0.293, + 0.658 + ], + "angle": 0, + "content": "Next Search Query Prediction" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.67, + 0.456, + 0.685 + ], + "angle": 0, + "content": "Instruction: Below is the user's interaction history: \\(\\langle \\mathbb{R}_{\\mathrm{I}}\\rangle\\)" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.685, + 0.457, + 0.698 + ], + "angle": 0, + "content": "\\(< \\mathrm{M}_{1 - }199>\\) \\(< \\mathrm{M}_{2 - }175>\\) \\(< \\mathrm{R}_{1 - }1>\\) \\(< \\mathrm{R}_{2 - }44>\\); \\(\\langle \\mathrm{R_I}\\rangle < \\mathrm{M}_{1 - }209>\\)" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.699, + 0.457, + 0.712 + ], + "angle": 0, + "content": "\\(\\langle \\mathsf{M}_2 - 235\\rangle\\) ;...;" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.713, + 0.456, + 0.74 + ], + "angle": 0, + "content": "\\(\\langle \\mathsf{M}_{2\\_68}\\rangle < \\mathsf{R}_{1\\_118}\\rangle < \\mathsf{R}_{2\\_85}\\rangle\\) . Please predict the next query the user might want to search." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.74, + 0.343, + 0.754 + ], + "angle": 0, + "content": "Response: \\(\\langle S_{\\mathrm{Q}}\\rangle\\) Artificial Intelligence" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.786, + 0.483, + 0.897 + ], + "angle": 0, + "content": "3.3.3 Next Search Item Prediction. To enable the model to perform well on the search task, we have it predict the next search item. Previous generative search models [35, 59] only input the user's query into the LLM to predict the target item, which considers only the correlation between the query and the item, without taking the user's preferences into account. To address this, we include the user's S&R history in the input to reflect their preferences. A sample of the data for this task is as follows:" + }, + { + "type": "title", + "bbox": [ + 0.542, + 0.108, + 0.715, + 0.121 + ], + "angle": 0, + "content": "Next Search Item Prediction" + }, + { + "type": "text", + "bbox": [ + 0.54, + 0.132, + 0.889, + 0.147 + ], + "angle": 0, + "content": "Instruction: Below is the user's interaction history: \\(\\langle \\mathsf{R}_{\\mathsf{I}}\\rangle\\)" + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.147, + 0.889, + 0.16 + ], + "angle": 0, + "content": "\\(< \\mathrm{M}_{1 - }199>\\) \\(< \\mathrm{M}_{2 - }175>\\) \\(< \\mathrm{R}_{1 - }1>\\) \\(< \\mathrm{R}_{2 - }44>\\); \\(\\langle \\mathrm{R_I}\\rangle < \\mathrm{M}_{1 - }209>\\)" + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.161, + 0.889, + 0.174 + ], + "angle": 0, + "content": "\\(\\langle \\mathsf{M}_2 - 235\\rangle\\) ;...;" + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.175, + 0.888, + 0.216 + ], + "angle": 0, + "content": "\\(\\langle \\mathsf{M}_{2 - 68}\\rangle < \\mathsf{R}_{1 - 118}\\rangle < \\mathsf{R}_{2 - 85}\\rangle\\) . The user's search query is \\(\\langle S_0\\rangle\\) Artificial Intelligence. Please predict the next item the user might click." + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.216, + 0.873, + 0.23 + ], + "angle": 0, + "content": "Response: \\(\\langle S_I\\rangle < M_{1\\_}23 > < M_{2\\_}42 > < S_{1\\_}126 > < S_{2\\_}73>\\)" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.246, + 0.913, + 0.275 + ], + "angle": 0, + "content": "Here, \\(\\langle S_Q\\rangle\\) Artificial Intelligence\" denotes the query that the user is currently searching for." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.281, + 0.915, + 0.364 + ], + "angle": 0, + "content": "3.3.4 Identifier-Language Alignment. To enhance the LLM's understanding of both the collaborative and semantic identifiers of each item, we designed an identifier-language alignment task. This task enables the LLM to generate a corresponding description based on an item's identifier and, conversely, to generate the appropriate identifier from the item's description." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.365, + 0.915, + 0.393 + ], + "angle": 0, + "content": "First, we have the Desc2ID task, which enables the LLM to generate the corresponding item identifier based on its description." + }, + { + "type": "title", + "bbox": [ + 0.542, + 0.403, + 0.596, + 0.415 + ], + "angle": 0, + "content": "Desc2ID" + }, + { + "type": "text", + "bbox": [ + 0.54, + 0.427, + 0.888, + 0.455 + ], + "angle": 0, + "content": "Instruction: Using the provided description \"Apple MacBook Air\", predict the corresponding item." + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.456, + 0.846, + 0.469 + ], + "angle": 0, + "content": "Response: \\(<\\mathrm{M}_{1-135}> <\\mathrm{M}_{2-19}> <\\mathrm{S}_{1-41}> <\\mathrm{S}_{2-65}>\\)" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.486, + 0.915, + 0.515 + ], + "angle": 0, + "content": "Then, we have the ID2Desc task, which enables the LLM to generate the corresponding item description based on its identifier." + }, + { + "type": "title", + "bbox": [ + 0.542, + 0.525, + 0.596, + 0.537 + ], + "angle": 0, + "content": "ID2Desc" + }, + { + "type": "text", + "bbox": [ + 0.54, + 0.548, + 0.888, + 0.576 + ], + "angle": 0, + "content": "Instruction: Please provide a description for the item \\( \\langle \\mathsf{M}_1 - 135\\rangle \\langle \\mathsf{M}_2 - 19\\rangle \\langle \\mathsf{S}_1 - 41\\rangle \\langle \\mathsf{S}_2 - 65\\rangle \\)." + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.577, + 0.738, + 0.59 + ], + "angle": 0, + "content": "Response: Apple MacBook Air." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.607, + 0.914, + 0.664 + ], + "angle": 0, + "content": "Please note that for both semantic and collaborative identifiers, we include the Desc2ID and ID2Desc training tasks. Since the input and output of these two tasks do not involve user history, we do not pretend a token indicating the behavior type to the identifier." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.674, + 0.748, + 0.69 + ], + "angle": 0, + "content": "3.4 Training and Inference" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.693, + 0.914, + 0.749 + ], + "angle": 0, + "content": "This section introduces how to train the LLM for joint S&R, and how to use the trained LLM during inference to generate the target item for either the search or recommendation task. The training and inference process of GenSAR is shown in Figure 3." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.755, + 0.915, + 0.824 + ], + "angle": 0, + "content": "3.4.1 Training. As previously mentioned, each interaction in the user's history is represented as an identifier, allowing us to formulate the task as a sequence-to-sequence problem. We train the model using next token prediction, optimizing the negative log-likelihood of generating the target as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.625, + 0.829, + 0.913, + 0.866 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = - \\sum_ {t = 1} ^ {T} \\log P \\left(y _ {t} \\mid y _ {< t}, \\text {I n s}\\right). \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.868, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Here, \\(y\\) represents the behavior-aware identifier of the target to be predicted, as defined in Eq. (11). \\(T\\) is the length of the identifier of" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.368, + 0.088 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "header", + "bbox": [ + 0.843, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "Teng Shi et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.083, + 0.105, + 0.486, + 0.148 + ], + "angle": 0, + "content": "Table 1: Comparison of different generative search or recommendation methods. \"S.\" and \"R.\" denote search and recommendation respectively." + }, + { + "type": "table", + "bbox": [ + 0.088, + 0.153, + 0.476, + 0.268 + ], + "angle": 0, + "content": "
MethodsScaleBackboneTaskIdentifier
S.R.SemanticCollaborative
P5 [11, 17]60M/220MT5-small/T5-baseXX
TIGER [26]60MT5-smallXX
LC-Rec [56]7BLLaMAXX
DSI-QG[59]220MT5-baseXX
WebUltron [58]220MT5-baseXX
GenRet [33]220MT5-baseXX
GenSAR (Ours)60MT5-small
" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.277, + 0.483, + 0.307 + ], + "angle": 0, + "content": "the target item. Ins refers to the various instructions described in Section 3.3, which are used as inputs for the LLM." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.313, + 0.483, + 0.465 + ], + "angle": 0, + "content": "3.4.2 Inference. During training, we train the LLM according to the input-output format described in Section 3.3. During inference, to apply the LLM to search and recommendation tasks, we append a behavior token, either “\\(\\langle S_I \\rangle\\)” for search or “\\(\\langle R_I \\rangle\\)” for recommendation, to the input of the LLM to prompt it to generate the corresponding next item for search or recommendation, respectively. The other tasks mentioned in Section 3.3 are used as auxiliary tasks during training to help the model better understand user S&R behaviors. During generation, to ensure that the items generated by the LLM are within the candidate set, we follow previous works [17, 56] and use constrained beam search." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.477, + 0.218, + 0.491 + ], + "angle": 0, + "content": "3.5 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.495, + 0.483, + 0.592 + ], + "angle": 0, + "content": "As shown in Table 1, we compare GenSAR with various generative search or recommendation methods in terms of scale (number of parameters), backbone architecture used, and applicable tasks. GenSAR adopts T5-small as its backbone, resulting in a relatively small number of parameters while being capable of serving both S&R tasks. Compared with existing methods, it achieves an optimal balance between efficiency and effectiveness." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.593, + 0.483, + 0.662 + ], + "angle": 0, + "content": "In terms of novelty, unlike existing methods that focus solely on either semantic or collaborative information in identifier design, our approach incorporates both the semantic information required for search and the collaborative signals essential for recommendation. This joint consideration helps alleviate the trade-off between S&R." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.674, + 0.22, + 0.689 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.692, + 0.483, + 0.707 + ], + "angle": 0, + "content": "We conducted experiments to evaluate the performance of GenSAR." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.718, + 0.291, + 0.735 + ], + "angle": 0, + "content": "4.1 Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.737, + 0.493, + 0.835 + ], + "angle": 0, + "content": "4.1.1 Dataset. We conducted experiments on the following datasets: (1) Amazon\\(^{1}\\) [13, 24]: Following previous works [2, 3, 29, 31], we use the semi-synthetic dataset based on Amazon recommendation data as the public dataset for our experiments.\\(^{2}\\) (2) Commercial: To thoroughly evaluate the effectiveness of GenSAR, we collected a dataset from a Chinese commercial app, containing S&R interactions from 10,000 users over two weeks. For details on data" + }, + { + "type": "page_footnote", + "bbox": [ + 0.082, + 0.842, + 0.483, + 0.865 + ], + "angle": 0, + "content": "1https://cseweb.ucsd.edu/~jmcauley/datasets/amazon/links.html, https://github.com/QingyaoAi/Amazon-Product-Search-Datasets" + }, + { + "type": "page_footnote", + "bbox": [ + 0.083, + 0.865, + 0.482, + 0.896 + ], + "angle": 0, + "content": "2Please note that \\(70\\%\\) of the items in the \"Kindle Store\" subset used in previous works [29, 31] lack textual information, so we use the \"Electronics\" subset, where less than \\(1\\%\\) of the items lack text." + }, + { + "type": "list", + "bbox": [ + 0.082, + 0.842, + 0.483, + 0.896 + ], + "angle": 0, + "content": null + }, + { + "type": "table_caption", + "bbox": [ + 0.514, + 0.105, + 0.913, + 0.133 + ], + "angle": 0, + "content": "Table 2: Statistics of the datasets used in this paper. \"S\" and \"R\" denote search and recommendation, respectively." + }, + { + "type": "table", + "bbox": [ + 0.521, + 0.138, + 0.908, + 0.186 + ], + "angle": 0, + "content": "
Dataset#Users#Items#Queries#Interaction-R#Interaction-S
Amazon192,40362,8839831,266,9031,081,934
Commercial10,000782,225135,2064,286,866383,465
" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.214, + 0.913, + 0.241 + ], + "angle": 0, + "content": "processing and train/validation/test splitting, please see the code link." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.254, + 0.913, + 0.281 + ], + "angle": 0, + "content": "4.1.2 Baselines. In this work, we use the following representative methods as baselines for comparison with GenSAR." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.282, + 0.916, + 0.421 + ], + "angle": 0, + "content": "First, we compare with the following recommendation models: (1) Sequential Recommendation: GRU4Rec [16]; SASRec [19]; FMLP-Rec [57]; LRURec [45]. (2) Generative Recommendation: P5-CID [11, 17]; TIGER [26]; LC-Rec [56]. Next, we compare with the following search models: (1) Personalized Search: QEM [2]; TEM [6]; CoPPS [7]. (2) Dense Retrieval: E5³ [36]; BGE⁴ [40]. (3) Generative Retrieval: DSI-QG [59]; WebUltron [58]; GenRet [33]. Finally, we compare with the following joint S&R models: JSR [46]; SESRec [31]; UnifiedSSR [41]; UniSAR [29]. For more details on the baselines, please see the code link." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.433, + 0.916, + 0.528 + ], + "angle": 0, + "content": "4.1.3 Evaluation Metrics & Implementation Details. Following previous works [29, 31, 57], we use ranking metrics including top-\\(k\\) Hit Ratio (HR) and top-\\(k\\) Normalized Discounted Cumulative Gain (NDCG). We report the results for \\(k\\) values of \\(\\{1, 5, 10\\}\\), and since NDCG@1 is the same as HR@1, we do not report it. For more details on the evaluation and model implementation, please see the code link." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.546, + 0.731, + 0.56 + ], + "angle": 0, + "content": "4.2 Overall Performance" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.565, + 0.916, + 0.592 + ], + "angle": 0, + "content": "Table 3 and Table 4 show the S&R results on two datasets, respectively. From the results, we can observe that:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.593, + 0.916, + 0.661 + ], + "angle": 0, + "content": "- Firstly, it can be seen that compared to existing search or recommendation models, GenSAR achieves state-of-the-art results. This validates the effectiveness of GenSAR in alleviating the trade-off between S&R through generative retrieval, by designing joint identifiers and training tasks for both tasks." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.661, + 0.916, + 0.744 + ], + "angle": 0, + "content": "- Secondly, we can observe that most joint S&R methods (e.g., JSR, UniSAR, GenSAR) outperform traditional methods that using only item IDs, such as sequential recommendation (e.g., SASRec, FMLP-Rec) and personalized search methods (e.g., QEM, TEM, CoPPS). This demonstrates the advantages of jointly modeling of S&R, as it enhances the performance of both tasks." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.745, + 0.916, + 0.828 + ], + "angle": 0, + "content": "- Thirdly, it can be observed that for search, dense retrieval (e.g., E5, BGE) and generative retrieval (e.g., GenRet, GenSAR) methods that rely on semantic information outperform personalized search models (e.g., QEM, TEM, CoPPS) that rely solely on ID information. This also confirms that for search, semantic information is more important than collaborative information." + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.593, + 0.916, + 0.828 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.514, + 0.862, + 0.915, + 0.896 + ], + "angle": 0, + "content": "\\(^{3}\\)https://huggingface.co/intfloat/multilingual-e5-base \n\\(^{4}\\)https://huggingface.co/BAAI/bge-base-en-v1.5, https://huggingface.co/BAAI/bge-base-zh-v1.5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.319, + 0.087 + ], + "angle": 0, + "content": "Unified Generative Search and Recommendation" + }, + { + "type": "header", + "bbox": [ + 0.631, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.104, + 0.916, + 0.161 + ], + "angle": 0, + "content": "Table 3: The recommendation performance of different methods on the two datasets. The best and the second-best methods are highlighted in bold and underlined fonts, respectively. The improvements over the second-best methods are statistically significant (t-test, \\( p \\)-value \\( < 0.05 \\)). Following commonly used settings [29, 31, 57], we pair the ground-truth item with 99 randomly sampled items that the user has not interacted with to form the candidate list." + }, + { + "type": "table", + "bbox": [ + 0.093, + 0.17, + 0.903, + 0.328 + ], + "angle": 0, + "content": "
DatasetsMetricsRecommendationJoint Search and Recommendation
GRU4RecSASRecFMLP-RecLRURecP5-CIDTIGERLC-RecJSRSESRecUnifiedSSRUniSARGenSAR
AmazonHR@10.04400.05440.05340.05440.08810.10730.10630.06570.06270.04770.06800.1261
HR@50.17160.18870.18980.18900.18740.20460.19730.20750.20830.16670.21710.2228
HR@100.28840.29920.30410.30010.27900.28520.27600.31880.32090.27070.33190.3063
NDCG@50.10740.12160.12170.12180.13800.15650.15220.13710.13590.10710.14320.1748
NDCG@100.14490.15710.15840.15750.16740.18240.17740.17290.17210.14050.18020.2015
CommercialHR@10.10220.15190.14420.13630.28430.26300.27030.15760.18900.15150.22140.2997
HR@50.25260.28120.27110.26370.33050.30130.30010.26850.28450.28440.32280.3496
HR@100.35270.37160.35840.35250.38300.34480.33330.35290.36900.38700.40560.4031
NDCG@50.17870.21790.20930.20210.30720.28190.28490.21420.23700.21950.27270.3241
NDCG@100.21100.24700.23730.23060.32400.29580.29550.24130.26410.25240.29930.3411
" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.33, + 0.916, + 0.388 + ], + "angle": 0, + "content": "Table 4: The search performance of different methods on the two datasets. Since search relies on semantic relevance, previous works [29, 41] that randomly sample negatives often produce overly easy examples, leading to inflated performance and poor model differentiation. To address this, we follow prior personalized search methods [1, 9] and use BM25 [27] to retrieve 99 harder negatives, forming a candidate list with the positive sample for more accurate evaluation." + }, + { + "type": "table", + "bbox": [ + 0.093, + 0.396, + 0.903, + 0.56 + ], + "angle": 0, + "content": "
DatasetsMetricsSearchJoint Search and Recommendation
QEMTEMCoPPSE5BGEDSI-QGWebUltronGenRetJSRUnifiedSSRUniSARGenSAR
AmazonHR@10.15120.08390.09430.32890.40300.35580.34320.41730.08350.07990.11220.5262
HR@50.31010.34710.33800.59450.62640.58480.54640.65130.24070.24760.31290.7529
HR@100.46570.51810.49090.72030.74750.68970.62160.73390.34630.36140.43330.8217
NDCG@50.23110.21730.21540.46620.52190.47640.45070.53990.16230.16620.21430.6485
NDCG@100.28090.27220.26470.50690.56130.51030.47480.56670.19620.20280.25330.6710
CommercialHR@10.03110.03280.02650.12770.12670.10160.08040.11710.02730.01190.05110.1249
HR@50.08700.11060.09980.31080.31840.28310.26190.33200.12020.04700.18100.3655
HR@100.15390.19250.17920.40440.41940.41320.39920.46660.21370.08730.32310.5250
NDCG@50.05860.07150.06260.22300.22580.19400.17210.22730.07280.02920.11440.2472
NDCG@100.07990.09770.08800.25330.25840.23590.21640.27080.10260.04200.15970.2987
" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.568, + 0.251, + 0.584 + ], + "angle": 0, + "content": "4.3 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.586, + 0.483, + 0.627 + ], + "angle": 0, + "content": "We conducted ablation study on the Commercial dataset to validate the effectiveness of the various training tasks in GenSAR, as shown in Table 5." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.628, + 0.483, + 0.737 + ], + "angle": 0, + "content": "Impact of Behavior Token. As shown in Section 3.2.4, we pretended a token indicating the type of behavior to be an identifier of each user interaction, enabling the LLM to recognize different behavior types. To evaluate its impact, we removed this behavior token, as shown in Table 5 (\"w/o Behavior Token\"). The results indicate that removing the behavior token degrades performance, validating that adding this token helps the LLM better understand the relationship between user S&R behaviors." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.739, + 0.483, + 0.89 + ], + "angle": 0, + "content": "Next Recommendation Item Prediction (NRIP). As shown in Section 3.3.1, we incorporated the training task \"Next Recommendation Item Prediction\" (NRIP), which enables the LLM to predict the next item to recommend based on user history. To evaluate its impact, we removed this task, as shown in Table 5 (\"w/o NRIP\"). The results demonstrate that removing this task significantly degrades recommendation performance and slightly reduces search performance, highlighting the importance of NRIP. Additionally, this demonstrates that recommendation training tasks can enhance search performance, verifying that recommendation can benefit search." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.569, + 0.915, + 0.693 + ], + "angle": 0, + "content": "Next Search Query Prediction (NSQP). We included the training task \"Next Search Query Prediction\" (NSQP) to enable the LLM to better understand user intent by predicting the next query a user might want to search, as described in Section 3.3.2. To evaluate its impact, we observed the results after removing this task, as shown in Table 5 (\"w/o NSQP\"). The results indicate that removing this task significantly degrades search performance and also affects recommendation performance, demonstrating that NSQP helps the model better understand user search intent." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.694, + 0.915, + 0.831 + ], + "angle": 0, + "content": "Next Search Item Prediction (NSIP). In Section 3.3.3, we introduced the training task \"Next Search Item Prediction\" (NSIP), which allows the LLM to predict the next item a user might click based on their history and input query. We analyzed the impact of this task, as shown in Table 5 (\"w/o NSIP\"). The results indicate that removing this task significantly degrades search performance, while also slightly affecting recommendation performance. This demonstrates the importance of NSIP for search and further highlights that search training tasks can enhance recommendation performance, validating that search can assist recommendation." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.832, + 0.915, + 0.887 + ], + "angle": 0, + "content": "Identifier-Language Alignment. In Section 3.3.4, we introduced two tasks, Desc2ID and ID2Desc, for identifier-language alignment, which help the LLM better understand the semantic and collaborative identifiers of each item. We observed the impact of" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.366, + 0.087 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "header", + "bbox": [ + 0.843, + 0.076, + 0.913, + 0.087 + ], + "angle": 0, + "content": "Teng Shi et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.105, + 0.482, + 0.145 + ], + "angle": 0, + "content": "Table 5: Ablation study on the Commercial dataset, where \"w/o\" denotes the removal of the corresponding module in GenSAR." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.15, + 0.47, + 0.313 + ], + "angle": 0, + "content": "
ModelRecommendationSearch
HR@5NDCG@5HR@5NDCG@5
GenSAR0.34960.32410.36550.2472
w/o Behavior Token0.34300.31930.32980.2224
w/o NRIP0.06650.03920.34560.2342
w/o NSQP0.34010.31630.30890.2053
w/o NSIP0.33900.31520.16680.1113
w/o Desc2ID0.34160.31880.33550.2278
w/o ID2Desc0.34580.32200.33980.2308
" + }, + { + "type": "image", + "bbox": [ + 0.09, + 0.314, + 0.277, + 0.404 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.103, + 0.409, + 0.266, + 0.419 + ], + "angle": 0, + "content": "(a) Recommendation Performance" + }, + { + "type": "image", + "bbox": [ + 0.287, + 0.315, + 0.475, + 0.404 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.326, + 0.409, + 0.439, + 0.419 + ], + "angle": 0, + "content": "(b) Search Performance" + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.431, + 0.481, + 0.445 + ], + "angle": 0, + "content": "Figure 4: Performance of GenSAR using different identifiers." + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.452, + 0.47, + 0.563 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.567, + 0.437, + 0.58 + ], + "angle": 0, + "content": "Figure 5: Collision rate of different identifiers." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.595, + 0.483, + 0.651 + ], + "angle": 0, + "content": "removing these two tasks, as shown in Table 5 (w/o \"Desc2ID\" and w/o \"ID2Desc\"). It can be seen that removing these tasks leads to a decrease in both S&R performance, indicating the effectiveness of these tasks in helping the LLM better understand item identifiers." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.663, + 0.314, + 0.679 + ], + "angle": 0, + "content": "4.4 Experimental Analysis" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.681, + 0.481, + 0.709 + ], + "angle": 0, + "content": "We conducted further experiments on the Commercial dataset to analyze the effectiveness of different modules in GenSAR." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.716, + 0.483, + 0.895 + ], + "angle": 0, + "content": "4.4.1 Impact of Different Identifier. To balance the semantic information needed for search and the collaborative information needed for recommendation, we designed the joint S&R identifier in Section 3.2. To validate its effectiveness, we compared it with identifiers learned directly from semantic embeddings or collaborative embeddings using RQ-VAE [26, 56], as shown in Figure 4. \"Only Collaborative\" represents using only collaborative embeddings, while \"Only Semantic\" represents using only semantic embeddings. The results show that identifiers derived solely from semantic or collaborative information lead to degraded performance. Furthermore, using only collaborative information results in worse search performance, which aligns with the fact that search relies more on semantic information." + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.11, + 0.709, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.535, + 0.203, + 0.698, + 0.214 + ], + "angle": 0, + "content": "(a) Recommendation Performance" + }, + { + "type": "image", + "bbox": [ + 0.72, + 0.11, + 0.907, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.757, + 0.203, + 0.871, + 0.214 + ], + "angle": 0, + "content": "(b) Search Performance" + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.225, + 0.913, + 0.266 + ], + "angle": 0, + "content": "Figure 6: Performance under different numbers of shared codebooks \\( L_{m} \\). We fix \\( L_{m} + L_{n} = 4 \\) and vary \\( L_{m} \\) to observe the results." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.29, + 0.915, + 0.34 + ], + "angle": 0, + "content": "4.4.2 Collision Rate of Different Identifier. Additionally, we analyzed the advantages of different identifiers from the perspective of collision rate. The formula for calculating the collision rate is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.602, + 0.342, + 0.825, + 0.369 + ], + "angle": 0, + "content": "\\[\n\\text {C o l l i s i o n R a t e} = 1 - \\frac {\\# \\text {U n i q u e I d e n s i t e r}}{\\# \\text {U n i q u e I t e m}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.374, + 0.915, + 0.569 + ], + "angle": 0, + "content": "where # Unique Identifier represents the number of unique identifiers, and # Unique Item represents the number of unique items. Since RQ-VAE does not guarantee a unique identifier for each item during the learning process, collisions may occur where different items share the same identifier [26, 56]. A higher collision rate can negatively impact the model's performance. From Figure 5, it can be observed that the two identifiers assigned to each item in GenSAR, incorporating both semantic and collaborative information, have a lower collision rate of \\(0.18\\%\\) and \\(0.39\\%\\), respectively. In contrast, identifiers derived solely from semantic embeddings or collaborative embeddings exhibit higher collision rates of \\(1.37\\%\\) and \\(0.90\\%\\), respectively. This further validates the advantage of the identifiers in GenSAR, as their lower collision rate enables the model to achieve better performance." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.578, + 0.913, + 0.73 + ], + "angle": 0, + "content": "4.4.3 Impact of Hyper-parameters. As described in Section 3.2, we have \\( L_{m} \\)-level shared codebooks and \\( L_{n} \\)-level specific codebooks. Here, we analyze the impact of the number of shared and specific codebooks (\\( L_{m} \\) and \\( L_{n} \\)) on the results, as shown in Figure 6. We fix \\( L_{m} + L_{n} = 4 \\) and observe the results. It can be seen that having too few (\\( L_{m} = 1 \\)) or too many (\\( L_{m} = 3 \\)) shared codebooks fails to achieve strong performance in both S&R. This indicates that \\( L_{m} \\) needs to be properly set so that the identifier can capture both the shared information between semantics and collaboration as well as their specific characteristics. Only in this way can we achieve better performance in both S&R." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.73, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Additionally, we analyzed the impact of identifier length on performance, as shown in Figure 7. We fix \\( L_{m} = 2 \\) and vary \\( L_{n} \\) to adjust the identifier length and observe the results. It can be seen that both shorter \\( (L_{m} + L_{n} = 3) \\) and longer \\( (L_{m} + L_{n} = 5) \\) identifiers lead to performance degradation. This is because, when the identifier is too short, the identifiers learned through RQ-VAE are more prone to collisions, resulting in a higher collision rate and making it difficult for the model to distinguish between different items. On the other hand, when the identifier is too long, the model requires more decoding steps during item generation, leading to accumulated errors and ultimately deteriorating performance. Therefore, it is essential to properly set the identifier length to achieve better performance." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.319, + 0.087 + ], + "angle": 0, + "content": "Unified Generative Search and Recommendation" + }, + { + "type": "header", + "bbox": [ + 0.631, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "image", + "bbox": [ + 0.09, + 0.109, + 0.278, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.103, + 0.203, + 0.267, + 0.214 + ], + "angle": 0, + "content": "(a) Recommendation Performance" + }, + { + "type": "image", + "bbox": [ + 0.287, + 0.109, + 0.476, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.325, + 0.203, + 0.439, + 0.214 + ], + "angle": 0, + "content": "(b) Search Performance" + }, + { + "type": "image_caption", + "bbox": [ + 0.083, + 0.225, + 0.485, + 0.253 + ], + "angle": 0, + "content": "Figure 7: Performance under different length of the identifier. We fix \\( L_{m} = 2 \\) and vary \\( L_{n} \\) to adjust the identifier length." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.273, + 0.208, + 0.286 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.291, + 0.484, + 0.471 + ], + "angle": 0, + "content": "In this paper, we propose GenSAR, which unifies balanced search and recommendation through generative retrieval to alleviate the trade-off between the two tasks and improve their performance. To balance the semantic information required for search and the collaborative information needed for recommendation, we design the joint S&R identifier and different training tasks. First, we learn two identifiers for each item to represent semantic and collaborative information, respectively. These identifiers share a common part to capture the information shared between semantics and collaboration while retaining distinct parts to preserve specific information. Second, we design different training tasks to help the model better understand the requirements of S&R tasks. We also validate the effectiveness of GenSAR through extensive experiments." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.485, + 0.178, + 0.499 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.502, + 0.483, + 0.533 + ], + "angle": 0, + "content": "[1] Wasi Uddin Ahmad, Kai-Wei Chang, and Hongning Wang. 2018. Multi-task learning for document ranking and query suggestion. In International conference on learning representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.533, + 0.483, + 0.563 + ], + "angle": 0, + "content": "[2] Qingyao Ai, Daniel N Hill, SVN Vishwanathan, and W Bruce Croft. 2019. A zero attention model for personalized product search. In Proceedings of the 28th ACM International Conference on Information and Knowledge Management. 379-388." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.563, + 0.482, + 0.603 + ], + "angle": 0, + "content": "[3] Qingyao Ai, Yongfeng Zhang, Keping Bi, Xu Chen, and W Bruce Croft. 2017. Learning a hierarchical embedding model for personalized product search. In Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval. 645-654." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.603, + 0.482, + 0.643 + ], + "angle": 0, + "content": "[4] Jinheon Baek, Nirupama Chandrasekaran, Silviu Cucerzan, Allen Herring, and Sujay Kumar Jauhar. 2024. Knowledge-augmented large language models for personalized contextual query suggestion. In Proceedings of the ACM on Web Conference 2024. 3355-3366." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.643, + 0.482, + 0.683 + ], + "angle": 0, + "content": "[5] Michele Bevilacqua, Giuseppe Ottaviano, Patrick Lewis, Scott Yih, Sebastian Riedel, and Fabio Petroni. 2022. Autoregressive search engines: Generating substrings as document identifiers. Advances in Neural Information Processing Systems 35 (2022), 31668-31683." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.683, + 0.482, + 0.723 + ], + "angle": 0, + "content": "[6] Keping Bi, Qingyao Ai, and W Bruce Croft. 2020. A transformer-based embedding model for personalized product search. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 1521-1524." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.723, + 0.482, + 0.773 + ], + "angle": 0, + "content": "[7] Shitong Dai, Jiongnan Liu, Zhicheng Dou, Haonan Wang, Lin Liu, Bo Long, and Ji-Rong Wen. 2023. Contrastive Learning for User Sequence Representation in Personalized Product Search. In Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, KDD 2023, Long Beach, CA, USA, August 6-10, 2023. ACM, 380-389." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.773, + 0.482, + 0.814 + ], + "angle": 0, + "content": "[8] Sunhao Dai, Ninglu Shao, Haiyuan Zhao, Weijie Yu, Zihua Si, Chen Xu, Zhongxiang Sun, Xiao Zhang, and Jun Xu. 2023. Uncovering chatgpt's capabilities in recommender systems. In Proceedings of the 17th ACM Conference on Recommender Systems. 1126-1132." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.814, + 0.482, + 0.845 + ], + "angle": 0, + "content": "[9] Chenlong Deng, Yujia Zhou, and Zhicheng Dou. 2022. Improving personalized search with dual-feedback network. In Proceedings of the fifteenth ACM international conference on web search and data mining, 210-218." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.845, + 0.482, + 0.896 + ], + "angle": 0, + "content": "[10] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)." + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.502, + 0.483, + 0.896 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.109, + 0.914, + 0.15 + ], + "angle": 0, + "content": "[11] Shijie Geng, Shuchang Liu, Zuohui Fu, Yingqiang Ge, and Yongfeng Zhang. 2022. Recommendation as language processing (rlp): A unified pretrain, personalized prompt & predict paradigm (p5). In Proceedings of the 16th ACM Conference on Recommender Systems. 299-315." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.15, + 0.915, + 0.2 + ], + "angle": 0, + "content": "[12] Yulong Gu, Wentian Bao, Dan Ou, Xiang Li, Baoliang Cui, Biyu Ma, Haikuan Huang, Qingwen Liu, and Xiaoyi Zeng. 2021. Self-supervised learning on users' spontaneous behaviors for multi-scenario ranking in e-commerce. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management. 3828-3837." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.2, + 0.914, + 0.231 + ], + "angle": 0, + "content": "[13] Ruining He and Julian McAuley. 2016. Ups and downs: Modeling the visual evolution of fashion trends with one-class collaborative filtering. In proceedings of the 25th international conference on world wide web. 507-517." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.231, + 0.914, + 0.261 + ], + "angle": 0, + "content": "[14] Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. 2017. Neural collaborative filtering. In Proceedings of the 26th international conference on world wide web. 173-182." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.261, + 0.914, + 0.311 + ], + "angle": 0, + "content": "[15] Zhankui He, Handong Zhao, Zhaowen Wang, Zhe Lin, Ajinkya Kale, and Julian Mcauley. 2022. Query-Aware Sequential Recommendation. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management (Atlanta, GA, USA) (CIKM '22). Association for Computing Machinery, New York, NY, USA, 4019-4023." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.311, + 0.914, + 0.361 + ], + "angle": 0, + "content": "[16] Balázs Hidasi, Alexandros Karatzoglou, Linas Baltrunas, and Domonkos Tikk. 2016. Session-based Recommendations with Recurrent Neural Networks. In 4th International Conference on Learning Representations, ICLR 2016, San Juan, Puerto Rico, May 2-4, 2016, Conference Track Proceedings, Yoshua Bengio and Yann LeCun (Eds.)." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.361, + 0.914, + 0.402 + ], + "angle": 0, + "content": "[17] Wenyue Hua, Shuyuan Xu, Yingqiang Ge, and Yongfeng Zhang. 2023. How to index item ids for recommendation foundation models. In Proceedings of the Annual International ACM SIGIR Conference on Research and Development in Information Retrieval in the Asia Pacific Region. 195-204." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.402, + 0.914, + 0.442 + ], + "angle": 0, + "content": "[18] Gautier Izacard, Mathilde Caron, Lucas Hosseini, Sebastian Riedel, Piotr Bojanowski, Armand Joulin, and Edouard Grave. 2021. Unsupervised dense information retrieval with contrastive learning. arXiv preprint arXiv:2112.09118 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.442, + 0.914, + 0.472 + ], + "angle": 0, + "content": "[19] Wang-Cheng Kang and Julian McAuley. 2018. Self-attentive sequential recommendation. In 2018 IEEE International Conference on Data Mining (ICDM). IEEE, 197-206." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.472, + 0.914, + 0.503 + ], + "angle": 0, + "content": "[20] Doyup Lee, Chihuon Kim, Saehoon Kim, Minsu Cho, and Wook-Shin Han. 2022. Autoregressive image generation using residual quantization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 11523-11532." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.503, + 0.914, + 0.533 + ], + "angle": 0, + "content": "[21] Xiaoxi Li, Jiajie Jin, Yujiia Zhou, Yuyao Zhang, Peitian Zhang, Yutao Zhu, and Zhicheng Dou. 2024. From matching to generation: A survey on generative information retrieval. arXiv preprint arXiv:2404.14851 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.533, + 0.914, + 0.572 + ], + "angle": 0, + "content": "[22] Yongqi Li, Nan Yang, Liang Wang, Furu Wei, and Wenjie Li. 2023. Multiview Identifiers Enhanced Generative Retrieval. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 6636-6648." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.572, + 0.914, + 0.613 + ], + "angle": 0, + "content": "[23] Jiayi Liao, Sihang Li, Zhengyi Yang, Jiancan Wu, Yancheng Yuan, Xiang Wang, and Xiangnan He. 2024. Llara: Large language-recommendation assistant. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1785-1795." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.613, + 0.914, + 0.654 + ], + "angle": 0, + "content": "[24] Julian McAuley, Christopher Targett, Qinfeng Shi, and Anton Van Den Hengel. 2015. Image-based recommendations on styles and substitutes. In Proceedings of the 38th international ACM SIGIR conference on research and development in information retrieval. 43-52." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.654, + 0.914, + 0.693 + ], + "angle": 0, + "content": "[25] Gustavo Penha, Ali Vardasbi, Enrico Palumbo, Marco De Nadai, and Hugues Bouchard. 2024. Bridging Search and Recommendation in Generative Retrieval: Does One Task Help the Other?. In Proceedings of the 18th ACM Conference on Recommender Systems. 340-349." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.693, + 0.914, + 0.734 + ], + "angle": 0, + "content": "[26] Shashank Rajput, Nikhil Mehta, Anima Singh, Raghunandan Hulikal Keshavan, Trung Vu, Lukasz Heldt, Lichan Hong, Yi Tay, Vinh Tran, Jonah Samost, et al. 2023. Recommender systems with generative retrieval. Advances in Neural Information Processing Systems 36 (2023), 10299-10315." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.734, + 0.914, + 0.764 + ], + "angle": 0, + "content": "[27] Stephen Robertson, Hugo Zaragoza, et al. 2009. The probabilistic relevance framework: BM25 and beyond. Foundations and Trends® in Information Retrieval 3, 4 (2009), 333-389." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.764, + 0.914, + 0.794 + ], + "angle": 0, + "content": "[28] Chenglei Shen, Xiao Zhang, Teng Shi, Changshuo Zhang, Guofu Xie, and Jun Xu. 2024. A survey of controllable learning: Methods and applications in information retrieval. arXiv preprint arXiv:2407.06083 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.794, + 0.914, + 0.844 + ], + "angle": 0, + "content": "[29] Teng Shi, Zihua Si, Jun Xu, Xiao Zhang, Xiaoxue Zang, Kai Zheng, Dewei Leng, Yanan Niu, and Yang Song. 2024. UniSAR: Modeling User Transition Behaviors between Search and Recommendation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1029-1039." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.844, + 0.914, + 0.895 + ], + "angle": 0, + "content": "[30] Zihua Si, Xueran Han, Xiao Zhang, Jun Xu, Yue Yin, Yang Song, and Ji-Rong Wen. 2022. A Model-Agnostic Causal Learning Framework for Recommendation Using Search Data. In Proceedings of the ACM Web Conference 2022 (Virtual Event, Lyon, France) (WWW '22). Association for Computing Machinery, New York, NY, USA, 224-233." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.109, + 0.915, + 0.895 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.367, + 0.087 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "header", + "bbox": [ + 0.843, + 0.076, + 0.913, + 0.087 + ], + "angle": 0, + "content": "Teng Shi et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.109, + 0.482, + 0.16 + ], + "angle": 0, + "content": "[31] Zihua Si, Zhongxiang Sun, Xiao Zhang, Jun Xu, Xiaoxue Zang, Yang Song, Kun Gai, and Ji-Rong Wen. 2023. When search meets recommendation: Learning disentangled search representation for recommendation. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1313-1323." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.16, + 0.482, + 0.211 + ], + "angle": 0, + "content": "[32] Fei Sun, Jun Liu, Jian Wu, Changhua Pei, Xiao Lin, Wenwu Ou, and Peng Jiang. 2019. BERT4Rec: Sequential Recommendation with Bidirectional Encoder Representations from Transformer. In Proceedings of the 28th ACM International Conference on Information and Knowledge Management (Beijing, China) (CIKM '19). ACM, New York, NY, USA, 1441-1450." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.211, + 0.482, + 0.251 + ], + "angle": 0, + "content": "[33] Weiwei Sun, Lingyong Yan, Zheng Chen, Shuaiqiang Wang, Haichao Zhu, Pengjie Ren, Zhumin Chen, Dawei Yin, Maarten Rijke, and Zhaochun Ren. 2024. Learning to tokenize for generative retrieval. Advances in Neural Information Processing Systems 36 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.251, + 0.482, + 0.292 + ], + "angle": 0, + "content": "[34] Jiakai Tang, Sunhao Dai, Teng Shi, Jun Xu, Xu Chen, Wen Chen, Wu Jian, and Yuning Jiang. 2025. Think Before Recommend: Unleashing the Latent Reasoning Power for Sequential Recommendation. arXiv:2503.22675 [cs.IR] https://arxiv.org/abs/2503.22675" + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.292, + 0.482, + 0.332 + ], + "angle": 0, + "content": "[35] Yi Tay, Vinh Tran, Mostafa Dehghani, Jianmo Ni, Dara Bahri, Harsh Mehta, Zhen Qin, Kai Hui, Zhe Zhao, Jai Gupta, et al. 2022. Transformer memory as a differentiable search index. Advances in Neural Information Processing Systems 35 (2022), 21831-21843." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.332, + 0.482, + 0.362 + ], + "angle": 0, + "content": "[36] Liang Wang, Nan Yang, Xiaolong Huang, Linjun Yang, Rangan Majumder, and Furu Wei. 2024. Multilingual e5 text embeddings: A technical report. arXiv preprint arXiv:2402.05672 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.362, + 0.482, + 0.412 + ], + "angle": 0, + "content": "[37] Yuening Wang, Man Chen, Yaochen Hu, Wei Guo, Yingxue Zhang, Hufeng Guo, Yong Liu, and Mark Coates. 2024. Enhancing Click-through Rate Prediction in Recommendation Domain with Search Query Representation. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 2462-2471." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.412, + 0.482, + 0.453 + ], + "angle": 0, + "content": "[38] Yujing Wang, Yingyan Hou, Haonan Wang, Ziming Miao, Shibin Wu, Qi Chen, Yuqing Xia, Chengmin Chi, Guoshuai Zhao, Zheng Liu, et al. 2022. A neural corpus indexer for document retrieval. Advances in Neural Information Processing Systems 35 (2022), 25600-25614." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.453, + 0.482, + 0.502 + ], + "angle": 0, + "content": "[39] Yu Wang, Zhengyang Wang, Hengrui Zhang, Qingyu Yin, Xianfeng Tang, Yinghan Wang, Danqing Zhang, Limeng Cui, Monica Cheng, Bing Yin, et al. 2023. Exploiting intent evolution in e-commercial query recommendation. In Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 5162-5173." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.503, + 0.482, + 0.543 + ], + "angle": 0, + "content": "[40] Shitao Xiao, Zheng Liu, Peitian Zhang, Niklas Muennighoff, Defu Lian, and Jian-Yun Nie. 2024. C-pack: Packed resources for general chinese embeddings. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 641-649." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.543, + 0.482, + 0.573 + ], + "angle": 0, + "content": "[41] Jiayi Xie, Shang Liu, Gao Cong, and Zhenzhong Chen. 2024. UnifiedSSR: A Unified Framework of Sequential Search and Recommendation. In Proceedings of the ACM on Web Conference 2024. 3410-3419." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.573, + 0.482, + 0.613 + ], + "angle": 0, + "content": "[42] Lee Xiong, Chenyan Xiong, Ye Li, Kwok-Fung Tang, Jialin Liu, Paul Bennett, Junaid Ahmed, and Arnold Overwijk. 2020. Approximate nearest neighbor negative contrastive learning for dense text retrieval. arXiv preprint arXiv:2007.00808 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.613, + 0.482, + 0.673 + ], + "angle": 0, + "content": "[43] Jing Yao, Zhicheng Dou, Ruobing Xie, Yanxiong Lu, Zhiping Wang, and Ji-Rong Wen. 2021. USER: A Unified Information Search and Recommendation Model Based on Integrated Behavior Sequence. In Proceedings of the 30th ACM International Conference on Information J& Knowledge Management (Virtual Event, Queensland, Australia) (CIKM '21). Association for Computing Machinery, New York, NY, USA, 2373-2382." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.673, + 0.482, + 0.723 + ], + "angle": 0, + "content": "[44] Zheng Yuan, Fajie Yuan, Yu Song, Youhua Li, Junchen Fu, Fei Yang, Yunzhu Pan, and Yongxin Ni. 2023. Where to go next for recommender systems? id-vs. modality-based recommender models revisited. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2639-2649." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.723, + 0.482, + 0.763 + ], + "angle": 0, + "content": "[45] Zhenrui Yue, Yueqi Wang, Zhankui He, Huimin Zeng, Julian McAuley, and Dong Wang. 2024. Linear recurrent units for sequential recommendation. In Proceedings of the 17th ACM International Conference on Web Search and Data Mining. 930-938." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.763, + 0.482, + 0.805 + ], + "angle": 0, + "content": "[46] Hamed Zamani and W. Bruce Croft. 2018. Joint Modeling and Optimization of Search and Recommendation. In Proceedings of the First Biennial Conference on Design of Experimental Search & Information Retrieval Systems, Bertinoro, Italy, August 28-31, 2018 (CEUR Workshop Proceedings, Vol. 2167). CEUR-WS.org, 36-41." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.805, + 0.482, + 0.845 + ], + "angle": 0, + "content": "[47] Hamed Zamani and W. Bruce Croft. 2020. Learning a Joint Search and Recommendation Model from User-Item Interactions. In Proceedings of the 13th International Conference on Web Search and Data Mining (Houston, TX, USA) (WSDM '20). Association for Computing Machinery, New York, NY, USA, 717–725." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.845, + 0.482, + 0.876 + ], + "angle": 0, + "content": "[48] Changshuo Zhang, Teng Shi, Xiao Zhang, Qi Liu, Ruobing Xie, Jun Xu, and Ji-Rong Wen. 2024. Modeling Domain and Feedback Transitions for Cross-Domain Sequential Recommendation. arXiv preprint arXiv:2408.08209 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.876, + 0.482, + 0.896 + ], + "angle": 0, + "content": "[49] Changshuo Zhang, Teng Shi, Xiao Zhang, Yanping Zheng, Ruobing Xie, Qi Liu, Jun Xu, and Ji-Rong Wen. 2024. QAGCF: Graph Collaborative Filtering for Q&A" + }, + { + "type": "list", + "bbox": [ + 0.086, + 0.109, + 0.482, + 0.896 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.11, + 0.81, + 0.12 + ], + "angle": 0, + "content": "Recommendation. arXiv preprint arXiv:2406.04828 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.12, + 0.914, + 0.15 + ], + "angle": 0, + "content": "[50] Changshuo Zhang, Xiao Zhang, Teng Shi, Jun Xu, and Ji-Rong Wen. 2025. Test-Time Alignment for Tracking User Interest Shifts in Sequential Recommendation. arXiv:2504.01489 [cs.IR] https://arxiv.org/abs/2504.01489" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.15, + 0.914, + 0.19 + ], + "angle": 0, + "content": "[51] Kepu Zhang, Teng Shi, Sunhao Dai, Xiao Zhang, Yinfeng Li, Jing Lu, Xiaoxue Zang, Yang Song, and Jun Xu. 2024. SAQRec: Aligning Recommender Systems to User Satisfaction via Questionnaire Feedback. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 3165-3175." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.19, + 0.914, + 0.222 + ], + "angle": 0, + "content": "[52] Xiao Zhang, Teng Shi, Jun Xu, Zhenhua Dong, and Ji-Rong Wen. 2024. Model-Agnostic Causal Embedding Learning for Counterfactually Group-Fair Recommendation. IEEE Transactions on Knowledge and Data Engineering (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.222, + 0.914, + 0.271 + ], + "angle": 0, + "content": "[53] Yuting Zhang, Yiqing Wu, Ruidong Han, Ying Sun, Yongchun Zhu, Xiang Li, Wei Lin, Fuzhen Zhuang, Zhulin An, and Yongjun Xu. 2024. Unified Dual-Intent Translation for Joint Modeling of Search and Recommendation. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 6291-6300." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.271, + 0.914, + 0.322 + ], + "angle": 0, + "content": "[54] Kai Zhao, Yukun Zheng, Tao Zhuang, Xiang Li, and Xiaoyi Zeng. 2022. Joint Learning of E-Commerce Search and Recommendation with a Unified Graph Neural Network. In Proceedings of the Fifteenth ACM International Conference on Web Search and Data Mining (Virtual Event, AZ, USA) (WSDM '22). Association for Computing Machinery, New York, NY, USA, 1461–1469." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.322, + 0.914, + 0.352 + ], + "angle": 0, + "content": "[55] Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, et al. 2023. A survey of large language models. arXiv preprint arXiv:2303.18223 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.352, + 0.914, + 0.393 + ], + "angle": 0, + "content": "[56] Bowen Zheng, Yupeng Hou, Hongyu Lu, Yu Chen, Wayne Xin Zhao, Ming Chen, and Ji-Rong Wen. 2024. Adapting large language models by integrating collaborative semantics for recommendation. In 2024 IEEE 40th International Conference on Data Engineering (ICDE). IEEE, 1435-1448." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.393, + 0.914, + 0.432 + ], + "angle": 0, + "content": "[57] Kun Zhou, Hui Yu, Wayne Xin Zhao, and Ji-Rong Wen. 2022. Filter-Enhanced MLP is All You Need for Sequential Recommendation. In Proceedings of the ACM Web Conference 2022 (Virtual Event, Lyon, France) (WWW'22). Association for Computing Machinery, New York, NY, USA, 2388-2399." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.432, + 0.914, + 0.462 + ], + "angle": 0, + "content": "[58] Yujia Zhou, Jing Yao, Ledell Wu, Zhicheng Dou, and Ji-Rong Wen. 2023. WebUltron: An Ultimate Retriever on Webpages Under the Model-Centric Paradigm. IEEE Transactions on Knowledge and Data Engineering (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.462, + 0.914, + 0.503 + ], + "angle": 0, + "content": "[59] Shengyao Zhuang, Houxing Ren, Linjun Shou, Jian Pei, Ming Gong, Guido Zuccon, and Daxin Jiang. 2022. Bridging the gap between indexing and retrieval for differentiable search index with query generation. arXiv preprint arXiv:2206.10128 (2022)." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.11, + 0.914, + 0.503 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05730/2fcd628a-662d-422e-89a8-df9b6d69e542_origin.pdf b/data/2025/2504_05xxx/2504.05730/2fcd628a-662d-422e-89a8-df9b6d69e542_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..327686dc9493f36e76fcd0c97b80148c8d6e82a6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/2fcd628a-662d-422e-89a8-df9b6d69e542_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5357ff8ca3ee05fee52620a6ec58638c9783a371aa7cd61af3961f535d980137 +size 953948 diff --git a/data/2025/2504_05xxx/2504.05730/full.md b/data/2025/2504_05xxx/2504.05730/full.md new file mode 100644 index 0000000000000000000000000000000000000000..0de1503adc59253822ca2c9013c26014673dfbb7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/full.md @@ -0,0 +1,508 @@ +# Unified Generative Search and Recommendation + +Teng Shi + +Renmin University of China + +Beijing, China + +shiteng@ruc.edu.cn + +Jun Xu* + +Xiao Zhang + +Renmin University of China + +Beijing, China + +{junxu,zhangx89}@ruc.edu.cn + +Xiaoxue Zang + +Kai Zheng + +Kuaishou Technology Co., Ltd. + +Beijing, China + +xxic666@126.com + +zhengk92@gmail.com + +Yang Song + +Kuaishou Technology Co., Ltd. + +Beijing, China + +ys@sonyis.me + +Enyun Yu + +Independent + +Beijing, China + +yuenyun@126.com + +# Abstract + +Modern commercial platforms typically offer both search and recommendation functionalities to serve diverse user needs, making joint modeling of these tasks an appealing direction. While prior work has shown that integrating search and recommendation can be mutually beneficial, it also reveals a performance trade-off: enhancements in one task often come at the expense of the other. This challenge arises from their distinct information requirements: search emphasizes semantic relevance between queries and items, whereas recommendation depends more on collaborative signals among users and items. Effectively addressing this trade-off requires tackling two key problems: (1) integrating both semantic and collaborative signals into item representations, and (2) guiding the model to distinguish and adapt to the unique demands of search and recommendation. The emergence of generative retrieval with Large Language Models (LLMs) presents new possibilities. This paradigm encodes items as identifiers and frames both search and recommendation as sequential generation tasks, offering the flexibility to leverage multiple identifiers and task-specific prompts. In light of this, we introduce GenSAR, a unified generative framework for balanced search and recommendation. Our approach designs dual-purpose identifiers and tailored training strategies to incorporate complementary signals and align with task-specific objectives. Experiments on both public and commercial datasets demonstrate that GenSAR effectively reduces the trade-off and achieves state-of-the-art performance on both tasks. + +# CCS Concepts + +- Information systems $\rightarrow$ Recommender systems; Personalization. + +*Corresponding authors. Work partially done at Engineering Research Center of Next-Generation Intelligent Search and Recommendation, Ministry of Education. Work done when Teng Shi was the intern at Kuaishou. + +Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org. + +Conference acronym 'XX, June 03-05, 2018, Woodstock, NY + +© 2018 Copyright held by the owner/author(s). Publication rights licensed to ACM. + +ACM ISBN 978-1-4503-XXXX-X/18/06 + +https://doi.org/XXXXXXXXXXXXXXXXXX + +# Keywords + +Recommendation; Search; Large Language Model + +# ACM Reference Format: + +Teng Shi, Jun Xu, Xiao Zhang, Xiaoxue Zang, Kai Zheng, Yang Song, and Enyun Yu. 2018. Unified Generative Search and Recommendation. In Proceedings of Make sure to enter the correct conference title from your rights confirmation emai (Conference acronym 'XX). ACM, New York, NY, USA, 10 pages. https://doi.org/XXXXXXXXX.XXXXXXX + +# 1 Introduction + +To facilitate the diverse ways of information access, many commercial platforms, such as e-commerce, video, and music platforms, offer both search [2, 3, 6, 7] and recommendation [34, 48-52] (S&R) services. This provides an opportunity for joint modeling of S&R, enabling better user interest modeling and enhancing the performance of both tasks. + +Many studies have explored joint modeling of S&R, including: leveraging recommendation to enhance search [2, 3, 6, 7], using search to enhance recommendation [15, 30, 31, 37], and unified S&R modeling [29, 41, 43, 46, 47]. Although these studies have demonstrated that S&R can mutually enhance each other, they have also identified a trade-off when the model serves both tasks simultaneously [29]. Specifically, when the recommendation performance improves, the search performance tends to degrade, and vice versa. Empirical analysis of the representative methods of JSR [46] and UniSAR [29] based on a S&R dataset collected from a real commercial platform also confirmed the performance trade-off, as shown in Figure 1(a). More details please refer to Section 4.1.1. + +Analysis also showed that the trade-off is rooted in the different information requirements of S&R. Search typically focuses more on the semantic relevance between queries and items, with traditional search models often based on pre-trained language models [18, 40, 42]. In contrast, recommendation heavily relies on collaborative information, where ID-based recommendation can yield excellent results [14, 19, 44]. Figure 1(b) shows an empirical validation where the S&R performances with ID- and Text-only embeddings are shown. The ID embeddings are randomly initialized and trained, containing collaborative information, while the Text embeddings are trained with BGE [40] and then reduced to the same dimensionality as that of the ID embeddings, containing semantic + +![](images/628b138c6ab45d0641959ce993765f284c74574409b6b4abd42ddca7826121cf.jpg) +(a) Trade-off between S&R + +![](images/0eb6ab1ddbe1b20f36c0374a24bd3c90cda43ce603162394fc84a167ceba717f.jpg) +(b) Performance of different embeddings +Figure 1: Empirical analysis on the Commercial dataset: (a) A trade-off between S&R is observed in representative joint S&R methods, JSR [46] and UniSAR [29]. (b) The performance of the sequential recommendation model SASRec [19] and the product search model QEM [2], using ID and text embeddings, respectively. + +information. From Figure 1(b), we found that recommendation relies more on collaborative information while search focuses more on semantic information. + +Therefore, balancing the semantic information required for search and the collaborative information needed for recommendation becomes a key issue in joint S&R modeling. It is non-trivial and faces two major challenges: (1) How to incorporate both semantic and collaborative information in item representations. Existing joint S&R models typically assign a single representation to each item, making it difficult to capture both types of information effectively; (2) How to let the model understand the difference in information requirements of S&R during training. Current joint models often treat S&R tasks identically, without differentiating them during training. This makes it challenging for the model to grasp their distinct requirements. + +Recently, Large Language Model (LLM) [55]-based generative retrieval for search [35, 59] and recommendation [11, 26, 56] have garnered significant attention. This provides a solution to the aforementioned challenges: (1) Generative retrieval assigns an identifier (a sequence of tokens) to each item, allowing us to assign multiple identifiers to each item to balance semantic and collaborative information; (2) Generative retrieval formulates both S&R as sequence-to-sequence (Seq2Seq) tasks, enabling the unification of different S&R tasks and helping the model better understand the distinct requirements of each task. + +Based on this, we propose GenSAR, which unifies balanced search and recommendation through generative retrieval, thereby alleviating the trade-off between S&R to better enhance each other. Firstly, we design a joint S&R identifier that integrates both semantic and collaborative information. Building on the RQ-VAE [26, 56] method, we employ shared codebooks for both semantic and collaborative information, alongside specific codebooks for each. As a result, items from search are represented by semantic codes, while items from recommendation are represented by collaborative codes. These two codes share a common portion to capture shared information while also retaining distinct parts to preserve the unique characteristics of semantic and collaborative information. Secondly, we design the joint S&R training tasks. We prepend a token representing the behavior type to the item identifier and then input the user's S&R history into the LLM (with the user query also provided for search). Different prompts are used to guide LLMs to predict the next recommended item, the next searched query, + +and the next searched item, enabling the model to understand the distinct requirements for S&R. + +The major contributions of the paper are summarized as follows: We verified the existence of the trade-off between S&R, and identified that this trade-off arises from the different information requirements of S&R. Additionally, we have analyzed the challenges in balancing semantic and collaborative information needed for S&R. + +- We propose GenSAR, which unifies balanced S&R through generative retrieval. We designed a joint S&R identifier to balance semantic and collaborative information, and developed joint training tasks to help the model understand the different requirements of each task. + +- Experimental results on two datasets validate the effectiveness of GenSAR. GenSAR not only surpasses traditional S&R models but also outperforms generative S&R models. + +# 2 Related Work + +Joint Search and Recommendation. Joint modeling of S&R has attracted increasing attention in recent years and can be broadly categorized into three types: (1) Enhancing search with recommendation [2, 3, 6, 7], such as TEM [6], which uses Transformers to model user preferences, and CoPPS [7], which applies contrastive learning to address data sparsity. (2) Enhancing recommendation with search [15, 30, 31, 37], e.g., SESRec [31], which disentangles similar and dissimilar interests from both histories. (3) Unified modeling of S&R [29, 41, 43, 46, 47, 53, 54], such as JSR [46, 47] with joint loss and UniSAR [29], which models behavior transitions. While these works show mutual benefits between S&R, they also reveal a trade-off [28, 29]. This paper addresses that trade-off within a generative retrieval framework. + +Generative Search and Recommendation. With the rise of Large Language Models (LLMs) [55], LLM-based generative retrieval has been widely explored for both search [5, 21, 33, 35, 38, 58, 59] and recommendation [11, 17, 25, 26, 56]. These methods represent items as identifiers and input the user query (for search) or user history (for recommendation) into the LLM to generate the target item. Identifier designs can be grouped into: (1) Text-based, using item titles [8, 23] or substrings [5, 22]; (2) Non-learnable ID-based, with early methods assigning random IDs [11], and later ones using clustering to encode semantic or collaborative structure [17, 35, 38]; (3) Learnable codebook-based, applying techniques like RQ-VAE [26, 56] to learn identifiers from semantic or collaborative embeddings. However, most existing approaches design identifiers tailored to either search or recommendation, focusing solely on semantic or collaborative information. In joint S&R, balancing both is essential for strong performance across tasks. + +# 3 Our Approach + +This section introduces our proposed method, GenSAR. Section 3.1 defines the Joint Search and Recommendation task. Section 3.2 presents the Joint Identifier module, where we design separate semantic and collaborative identifiers to balance the different needs of search and recommendation. Section 3.3 describes task-specific training objectives to help the model capture both types of information. Finally, Section 3.4 details the training and inference process of GenSAR. + +![](images/757f11261ee325c692b5f00503a37aa68878e51f8dce743a82fe3fffcbf18636.jpg) +Figure 2: The joint search and recommendation identifier. We extract the semantic and collaborative embeddings for each item. These two embeddings are first concatenated and passed through the shared codebooks to learn shared codes. Then, the semantic and collaborative embeddings are separately processed through specific codebooks to learn specific codes. Finally, these codes are concatenated to form two identifiers for each item: one for semantics and one for collaboration. + +# 3.1 Problem Formulation + +Let $\mathcal{U},\mathcal{V},Q$ denote the sets of users, items, and queries, respectively. Each user $u\in \mathcal{U}$ has a chronologically ordered interaction history $S_{u} = [(b_{1},x_{1}),(b_{2},x_{2}),\ldots ,(b_{N},x_{N})]$ that includes her historical S&R behaviors, where $N$ denotes the number of $u$ 's historical behaviors. $b_{i}\in \{\langle \mathrm{R}_{\mathrm{I}}\rangle ,\langle \mathrm{S}_{\mathrm{Q}}\rangle ,\langle \mathrm{S}_{\mathrm{I}}\rangle \}$ represents the type of the $i$ -th behavior: $\langle \mathrm{R_I}\rangle$ indicates an item clicked by the user after a recommendation, $\langle \mathrm{S_Q}\rangle$ represents a query searched by the user, and $\langle \mathrm{S_I}\rangle$ denotes an item clicked by the user after searching a query. $x_{i}$ denotes the $i$ -th behavior: + +$$ +x _ {i} = \left\{ \begin{array}{l l} v _ {i}, & \text {i f} b _ {i} = \langle \mathrm {R} _ {\mathrm {I}} \rangle \text {o r} b _ {i} = \langle \mathrm {S} _ {\mathrm {I}} \rangle , \\ q _ {i}, & \text {i f} b _ {i} = \langle \mathrm {S} _ {\mathrm {Q}} \rangle , \end{array} \right. \tag {1} +$$ + +where $v_{i} \in \mathcal{V}$ denotes the $i$ -th interacted item and $q_{i} \in Q$ is the $i$ -th searched query. Our goal is to enable the model to understand user interests and predict the next item $v_{N+1}$ for search when $b_{N+1} = \langle \mathrm{S}_{\mathrm{I}} \rangle$ or recommendation when $b_{N+1} = \langle \mathrm{R}_{\mathrm{I}} \rangle$ . + +# 3.2 Joint Search and Recommendation Identifier + +This section introduces the design of the joint S&R identifier (Figure 2). We first extract semantic and collaborative embeddings for each item. Using RQ-VAE [20, 26, 56], we apply both shared and separate codebooks to learn two identifiers per item—one semantic, one collaborative. The identifiers share common parts to capture shared information, while retaining unique parts to reflect task-specific features. + +3.2.1 Embedding Extraction. For each item $v \in \mathcal{V}$ , we can input its textual information, such as the title and description, into a pre-trained retrieval model (e.g., BERT [10], BGE [40]) to obtain an embedding $\mathbf{v}_s \in \mathbb{R}^{d_s}$ that contains its semantic information. Meanwhile, we can also obtain an embedding $\mathbf{v}_c \in \mathbb{R}^{d_c}$ containing its collaborative information from a pre-trained recommendation + +model (e.g., SASRec [19], BERT4Rec [32]). $d_{s}$ and $d_{c}$ represent the dimensions of the semantic and collaborative embeddings, respectively. We map the semantic and collaborative embeddings to the same-dimensional latent space using two encoders: + +$$ +\mathbf {z} _ {s} = \operatorname {E n c o d e r} _ {s} (\mathbf {v} _ {s}), \quad \mathbf {z} _ {c} = \operatorname {E n c o d e r} _ {c} (\mathbf {v} _ {c}), \tag {2} +$$ + +where $\mathbf{z}_s \in \mathbb{R}^d, \mathbf{z}_c \in \mathbb{R}^d$ and $d$ is the dimension of the latent embeddings, $\mathrm{Encoder}_s(\cdot)$ and $\mathrm{Encoder}_c(\cdot)$ are two MLPs (Multilayer Perceptrons). + +3.2.2 Residual Quantization. To integrate both semantic and collaborative information, we use $L_{m}$ -level shared codebooks, along with $L_{n}$ -level specific codebooks for semantic and collaborative information, respectively. First, the latent embeddings for semantic and collaborative information, $\mathbf{z}_s$ and $\mathbf{z}_c$ , are concatenated to form $\mathbf{r}_0^m = [\mathbf{z}_s; \mathbf{z}_c] \in \mathbb{R}^{2d}$ . This $\mathbf{r}_0^m$ is then passed through the $L_{m}$ -level shared codebooks to obtain the shared codes $I_m$ and the residual embedding $\mathbf{r}_{L_m}^m$ . Then, we extract the semantic part $\mathbf{r}_0^s = \mathbf{r}_{L_m}^m [1:d] \in \mathbb{R}^d$ and the collaborative part $\mathbf{r}_0^c = \mathbf{r}_{L_m}^m [d:2d] \in \mathbb{R}^d$ from $\mathbf{r}_{L_m}^m$ , and input them separately into the semantic and collaborative codebooks to learn their specific codes $I_s$ and $I_c$ , respectively. Finally, the shared and specific codes are concatenated, resulting in two identifiers, $I_{m+s}$ and $I_{m+c}$ , for each item. Next, we will introduce the residual quantization process for both the shared and specific codebooks. + +- Shared Codebooks. We have $L_{m}$ -level shared codebooks. At each level $i \in \{1, 2, \dots, L_{m}\}$ , we have a shared codebook $C_{i}^{m} = \{\mathbf{e}_{k}\}_{k=1}^{K}$ where $K$ is the size of each codebook and $\mathbf{e}_{k} \in \mathbb{R}^{2d}$ is a learnable code embedding. The residual quantization process for the shared + +codebooks is as follows: + +$$ +c _ {i} ^ {m} = \underset {k} {\arg \min} | | \mathbf {r} _ {i - 1} ^ {m} - \mathbf {e} _ {k} | | _ {2} ^ {2}, \quad \mathbf {e} _ {k} \in C _ {i} ^ {m}, +$$ + +$$ +\mathbf {r} _ {i} ^ {m} = \mathbf {r} _ {i - 1} ^ {m} - \mathbf {e} _ {c _ {i} ^ {m}}, \quad \mathbf {r} _ {0} ^ {m} = [ \mathbf {z} _ {s}; \mathbf {z} _ {c} ] \in \mathbb {R} ^ {2 d}, +$$ + +where $c_{i}^{m}$ is the assigned code from the $i$ -th level of the shared codebook. $\mathbf{r}_{i-1}^{m}$ is the residual from last level. Through the recursive quantization in Eq. (3), we can obtain the shared codes $I_{m} = \left[c_{1}^{m}, c_{2}^{m}, \ldots, c_{L_{m}}^{m}\right]$ and the residual embedding $\mathbf{r}_{L_{m}}^{m}$ . + +- Specific Codebooks. We can extract the semantic part $\mathbf{r}_0^s = \mathbf{r}_{L_m}^m [1:d] \in \mathbb{R}^d$ and the collaborative part $\mathbf{r}_0^c = \mathbf{r}_{L_m}^m [d:2d] \in \mathbb{R}^d$ from the residual embedding $\mathbf{r}_{L_m}^m$ outputted by the shared codebooks. We then pass them separately through the $L_n$ -level semantic and collaborative specific codebooks $C_i^s$ and $C_i^c$ , where $i \in \{1, 2, \dots, L_n\}$ . Please note that, unlike the shared codebook whose code embeddings are $2d$ -dimensional, the code embeddings of the specific codebooks are $d$ -dimensional. The residual quantization process for the specific codebooks can be formulated as follows: + +$$ +c_{i}^{s} = \operatorname *{arg min}_{k}\left|\left|\mathbf{r}_{i - 1}^{s} - \mathbf{e}_{k}\right|\right|_{2}^{2},\quad \mathbf{e}_{k}\in C_{i}^{s}, +$$ + +$$ +c _ {i} ^ {c} = \underset {k} {\arg \min } \left\| \mathbf {r} _ {i - 1} ^ {c} - \mathbf {e} _ {k} \right\| _ {2} ^ {2}, \quad \mathbf {e} _ {k} \in C _ {i} ^ {c}, \tag {4} +$$ + +$$ +\mathbf {r} _ {i} ^ {s} = \mathbf {r} _ {i - 1} ^ {s} - \mathbf {e} _ {c _ {i} ^ {s}}, \quad \mathbf {r} _ {i} ^ {c} = \mathbf {r} _ {i - 1} ^ {c} - \mathbf {e} _ {c _ {i} ^ {c}}, +$$ + +where $c_i^s$ and $c_i^r$ represent the codes assigned by the $i$ -th level semantic-specific and collaborative-specific codebooks, respectively. Through the recursive quantization in Eq. (4), we can obtain the semantic-specific and collaborative-specific codes as follows: + +$$ +I _ {s} = \left[ c _ {1} ^ {s}, c _ {2} ^ {s}, \dots , c _ {L _ {n}} ^ {s} \right], \quad I _ {c} = \left[ c _ {1} ^ {c}, c _ {2} ^ {c}, \dots , c _ {L _ {n}} ^ {c} \right]. +$$ + +Finally, by concatenating the shared codes and the specific codes, we can obtain the semantic identifier $I_{m + s}$ and collaborative identifier $I_{m + c}$ for item $v$ : + +$$ +I _ {m + s} = \left[ c _ {1} ^ {m}, c _ {2} ^ {m}, \dots , c _ {L _ {m}} ^ {m}, c _ {1} ^ {s}, c _ {2} ^ {s}, \dots , c _ {L _ {n}} ^ {s} \right], +$$ + +$$ +I _ {m + c} = \left[ c _ {1} ^ {m}, c _ {2} ^ {m}, \dots , c _ {L _ {m}} ^ {m}, c _ {1} ^ {c}, c _ {2} ^ {c}, \dots , c _ {L _ {n}} ^ {c} \right]. \tag {5} +$$ + +3.2.3 Identifier Training. After passing through the shared and specific codebooks, we can obtain the semantic and collaborative quantized embeddings as follows: + +$$ +\hat {\mathbf {z}} _ {s} = \sum_ {i = 1} ^ {L _ {m}} \mathbf {e} _ {c _ {i} ^ {m}} [ 1: d ] + \sum_ {i = 1} ^ {L _ {n}} \mathbf {e} _ {c _ {i} ^ {s}}, \quad \hat {\mathbf {z}} _ {c} = \sum_ {i = 1} ^ {L _ {m}} \mathbf {e} _ {c _ {i} ^ {m}} [ d: 2 d ] + \sum_ {i = 1} ^ {L _ {n}} \mathbf {e} _ {c _ {i} ^ {c}}, \tag {6} +$$ + +where $\mathbf{e}_{c_i^m} \in \mathbb{R}^{2d}$ is the code embedding of the shared codebooks, $\mathbf{e}_{c_i^s} \in \mathbb{R}^d$ and $\mathbf{e}_{c_i^c} \in \mathbb{R}^d$ are the code embeddings of the semantic and collaborative specific codebooks. The quantized semantic embedding $\hat{\mathbf{z}}_s \in \mathbb{R}^d$ and collaborative embedding $\hat{\mathbf{z}}_c \in \mathbb{R}^d$ will be used to reconstruct the original semantic and collaborative embeddings, $\mathbf{v}_s$ and $\mathbf{v}_c$ : + +$$ +\hat {\mathbf {v}} _ {s} = \operatorname {D e c o d e r} _ {s} (\hat {\mathbf {z}} _ {s}), \quad \hat {\mathbf {v}} _ {c} = \operatorname {D e c o d e r} _ {c} (\hat {\mathbf {z}} _ {c}), \tag {7} +$$ + +where $\mathrm{Decoderr}_s(\cdot)$ and $\mathrm{Decoderr}_c(\cdot)$ are two MLPs. We can compute the reconstruction loss used for training the encoder and decoder as follows: + +$$ +\mathcal {L} _ {\text {R e c o n}} = \left\| \mathbf {v} _ {s} - \hat {\mathbf {v}} _ {s} \right\| _ {2} ^ {2} + \left\| \mathbf {v} _ {c} - \hat {\mathbf {v}} _ {c} \right\| _ {2} ^ {2}. \tag {8} +$$ + +![](images/6f6f1922647d9249aee68c7d995b2b82222e7b5d1b014f595f282014536b00fc.jpg) + +![](images/9edd3d9b02c30b3b1ca9e7ca1a76fa2f37f746d23bd32759acf9aefd1337c887.jpg) +Figure 3: Training and Inference Process of GenSAR. During training, we provide LLM with different instructions to generate corresponding responses. During inference, we append a token at the end of the instruction to indicate the type of behavior to be predicted, enabling the LLM to be applied to either search or recommendation tasks. + +We can also compute the loss for residual quantization as follows: + +$$ +\mathcal {L} _ {\mathrm {R Q}} ^ {m} = \sum_ {i = 1} ^ {L _ {m}} | | \mathrm {s g} [ \mathbf {r} _ {i - 1} ^ {m} ] - \mathbf {e} _ {c _ {i} ^ {m}} | | _ {2} ^ {2} + \alpha | | \mathbf {r} _ {i - 1} ^ {m} - \mathrm {s g} [ \mathbf {e} _ {c _ {i} ^ {m}} ] | | _ {2} ^ {2}, +$$ + +$$ +\mathcal {L} _ {\mathrm {R Q}} ^ {s} = \sum_ {i = 1} ^ {L _ {n}} | | \operatorname {s g} \left[ \mathbf {r} _ {i - 1} ^ {s} \right] - \mathbf {e} _ {c _ {i} ^ {s}} | | _ {2} ^ {2} + \alpha | | \mathbf {r} _ {i - 1} ^ {s} - \operatorname {s g} \left[ \mathbf {e} _ {c _ {i} ^ {s}} \right] | | _ {2} ^ {2}, \tag {9} +$$ + +$$ +\mathcal {L} _ {\mathrm {R Q}} ^ {c} = \sum_ {i = 1} ^ {L _ {n}} | | \mathrm {s g} [ \mathbf {r} _ {i - 1} ^ {c} ] - \mathbf {e} _ {c _ {i} ^ {c}} | | _ {2} ^ {2} + \alpha | | \mathbf {r} _ {i - 1} ^ {c} - \mathrm {s g} [ \mathbf {e} _ {c _ {i} ^ {c}} ] | | _ {2} ^ {2}, +$$ + +$$ +\mathcal {L} _ {\mathrm {R Q}} = \mathcal {L} _ {\mathrm {R Q}} ^ {m} + \mathcal {L} _ {\mathrm {R Q}} ^ {s} + \mathcal {L} _ {\mathrm {R Q}} ^ {c}, +$$ + +where $\mathrm{sg}[\cdot ]$ denotes the stop-gradient operation and $\alpha$ is a hyperparameter. $\mathcal{L}_{\mathrm{RQ}}$ is used to train the code embeddings in both the shared and specific codebooks. Finally, the total loss for training the identifier is as follows: + +$$ +\mathcal {L} _ {\mathrm {R Q - V A E}} = \mathcal {L} _ {\mathrm {R e c o n}} + \mathcal {L} _ {\mathrm {R Q}}. \tag {10} +$$ + +3.2.4 Behavior-aware Identifier. After learning the semantic and collaborative identifiers for each item, we can represent each user interaction $(b_{i},x_{i})$ as shown in Eq. (1). To help the model understand different behaviors in the user's interaction history, we preprocess a token indicating the behavior type to each interaction's identifier. For interactions involving items, we preprocess the corresponding behavior token to the identifier of each item. For interactions involving queries, we preprocess the behavior token to the word sequence of the query. It can be formulated as follows: + +$$ +\operatorname {I D} \left(b _ {i}, x _ {i}\right) = \left\{ \begin{array}{l l} {\left[ \langle \mathrm {R} _ {\mathrm {I}} \rangle , c _ {1} ^ {m}, c _ {2} ^ {m}, \dots , c _ {L _ {m}} ^ {m}, c _ {1} ^ {c}, c _ {2} ^ {c}, \dots , c _ {L _ {n}} ^ {c} \right],} & \text {i f} b _ {i} = \langle \mathrm {R} _ {\mathrm {I}} \rangle , \\ {\left[ \langle \mathrm {S} _ {\mathrm {Q}} \rangle , w _ {1}, w _ {2}, \dots , w _ {| q _ {i} |} \right],} & \text {i f} b _ {i} = \langle \mathrm {S} _ {\mathrm {Q}} \rangle , \\ {\left[ \langle \mathrm {S} _ {\mathrm {I}} \rangle , c _ {1} ^ {m}, c _ {2} ^ {m}, \dots , c _ {L _ {m}} ^ {m}, c _ {1} ^ {s}, c _ {2} ^ {s}, \dots , c _ {L _ {n}} ^ {s} \right],} & \text {i f} b _ {i} = \langle \mathrm {S} _ {\mathrm {I}} \rangle , \end{array} \right. \tag {11} +$$ + +where $\left[w_{1},w_{2},\dots ,w_{|q_{i}}\right]$ are the words of query $q_{i}$ . $\mathrm{ID}(\cdot)$ denotes the function for obtaining the identifier of each interaction. + +# 3.3 Joint Search and Recommendation Training + +To better adapt the LLM to joint S&R tasks, we design training objectives that help it understand user behaviors and effectively learn both semantic and collaborative identifiers. + +3.3.1 Next Recommendation Item Prediction. To enable the LLM to perform well on the recommendation task, we let it predict the next recommended item. Unlike previous generative recommendation models [11, 26, 56] that only use the user's recommendation history, our approach incorporates search history as well. This allows the LLM to better leverage the user's historical information and understand the relationship between S&R behaviors. A sample of the data is shown below: + +# Next Recommendation Item Prediction + +Instruction: Below is the user's interaction history: $\langle S_0\rangle$ + +Piano; $\langle S_I\rangle < M_{1 - }247 > < M_{2 - }197 > < S_{1 - }184 > < S_{2 - }110>$ + +...; $\langle R_I\rangle$ . + +Please recommend the next item the user is likely to click. + +Response: $\langle \mathrm{R_I}\rangle < \mathrm{M_{1\_}10} > < \mathrm{M_{2\_}25} > < \mathrm{R_{1\_}52} > < \mathrm{R_{2\_}37}>$ + +Here, $\langle \mathrm{M}_{1}10\rangle < \mathrm{M}_{2}25\rangle$ represents the shared semantic and collaborative identifier of the item, $\langle \mathrm{S}_{1}184\rangle < \mathrm{S}_{2}110\rangle$ represents the semantic-specific identifier, and $\langle \mathrm{R}_{1}52\rangle < \mathrm{R}_{2}37\rangle$ represents the collaborative-specific identifier. + +3.3.2 Next Search Query Prediction. Some works focus on query recommendation [4, 12, 39], where they predict the next query a user is likely to search. Since our user interaction history also includes search queries, we introduce a task that allows the LLM to predict the user's next intended search query based on their history. This helps the model better understand user search intent and the relationship between S&R behaviors. A sample of the data for this task is as follows: + +# Next Search Query Prediction + +Instruction: Below is the user's interaction history: $\langle \mathbb{R}_{\mathrm{I}}\rangle$ + +$< \mathrm{M}_{1 - }199>$ $< \mathrm{M}_{2 - }175>$ $< \mathrm{R}_{1 - }1>$ $< \mathrm{R}_{2 - }44>$ ; $\langle \mathrm{R_I}\rangle < \mathrm{M}_{1 - }209>$ + +$\langle \mathsf{M}_2 - 235\rangle$ ;...; + +$\langle \mathsf{M}_{2\_68}\rangle < \mathsf{R}_{1\_118}\rangle < \mathsf{R}_{2\_85}\rangle$ . Please predict the next query the user might want to search. + +Response: $\langle S_{\mathrm{Q}}\rangle$ Artificial Intelligence + +3.3.3 Next Search Item Prediction. To enable the model to perform well on the search task, we have it predict the next search item. Previous generative search models [35, 59] only input the user's query into the LLM to predict the target item, which considers only the correlation between the query and the item, without taking the user's preferences into account. To address this, we include the user's S&R history in the input to reflect their preferences. A sample of the data for this task is as follows: + +# Next Search Item Prediction + +Instruction: Below is the user's interaction history: $\langle \mathsf{R}_{\mathsf{I}}\rangle$ + +$< \mathrm{M}_{1 - }199>$ $< \mathrm{M}_{2 - }175>$ $< \mathrm{R}_{1 - }1>$ $< \mathrm{R}_{2 - }44>$ ; $\langle \mathrm{R_I}\rangle < \mathrm{M}_{1 - }209>$ + +$\langle \mathsf{M}_2 - 235\rangle$ ;...; + +$\langle \mathsf{M}_{2 - 68}\rangle < \mathsf{R}_{1 - 118}\rangle < \mathsf{R}_{2 - 85}\rangle$ . The user's search query is $\langle S_0\rangle$ Artificial Intelligence. Please predict the next item the user might click. + +Response: $\langle S_I\rangle < M_{1\_}23 > < M_{2\_}42 > < S_{1\_}126 > < S_{2\_}73>$ + +Here, $\langle S_Q\rangle$ Artificial Intelligence" denotes the query that the user is currently searching for. + +3.3.4 Identifier-Language Alignment. To enhance the LLM's understanding of both the collaborative and semantic identifiers of each item, we designed an identifier-language alignment task. This task enables the LLM to generate a corresponding description based on an item's identifier and, conversely, to generate the appropriate identifier from the item's description. + +First, we have the Desc2ID task, which enables the LLM to generate the corresponding item identifier based on its description. + +# Desc2ID + +Instruction: Using the provided description "Apple MacBook Air", predict the corresponding item. + +Response: $<\mathrm{M}_{1-135}> <\mathrm{M}_{2-19}> <\mathrm{S}_{1-41}> <\mathrm{S}_{2-65}>$ + +Then, we have the ID2Desc task, which enables the LLM to generate the corresponding item description based on its identifier. + +# ID2Desc + +Instruction: Please provide a description for the item $\langle \mathsf{M}_1 - 135\rangle \langle \mathsf{M}_2 - 19\rangle \langle \mathsf{S}_1 - 41\rangle \langle \mathsf{S}_2 - 65\rangle$ . + +Response: Apple MacBook Air. + +Please note that for both semantic and collaborative identifiers, we include the Desc2ID and ID2Desc training tasks. Since the input and output of these two tasks do not involve user history, we do not pretend a token indicating the behavior type to the identifier. + +# 3.4 Training and Inference + +This section introduces how to train the LLM for joint S&R, and how to use the trained LLM during inference to generate the target item for either the search or recommendation task. The training and inference process of GenSAR is shown in Figure 3. + +3.4.1 Training. As previously mentioned, each interaction in the user's history is represented as an identifier, allowing us to formulate the task as a sequence-to-sequence problem. We train the model using next token prediction, optimizing the negative log-likelihood of generating the target as follows: + +$$ +\mathcal {L} = - \sum_ {t = 1} ^ {T} \log P \left(y _ {t} \mid y _ {< t}, \text {I n s}\right). \tag {12} +$$ + +Here, $y$ represents the behavior-aware identifier of the target to be predicted, as defined in Eq. (11). $T$ is the length of the identifier of + +Table 1: Comparison of different generative search or recommendation methods. "S." and "R." denote search and recommendation respectively. + +
MethodsScaleBackboneTaskIdentifier
S.R.SemanticCollaborative
P5 [11, 17]60M/220MT5-small/T5-baseXX
TIGER [26]60MT5-smallXX
LC-Rec [56]7BLLaMAXX
DSI-QG[59]220MT5-baseXX
WebUltron [58]220MT5-baseXX
GenRet [33]220MT5-baseXX
GenSAR (Ours)60MT5-small
+ +the target item. Ins refers to the various instructions described in Section 3.3, which are used as inputs for the LLM. + +3.4.2 Inference. During training, we train the LLM according to the input-output format described in Section 3.3. During inference, to apply the LLM to search and recommendation tasks, we append a behavior token, either “ $\langle S_I \rangle$ ” for search or “ $\langle R_I \rangle$ ” for recommendation, to the input of the LLM to prompt it to generate the corresponding next item for search or recommendation, respectively. The other tasks mentioned in Section 3.3 are used as auxiliary tasks during training to help the model better understand user S&R behaviors. During generation, to ensure that the items generated by the LLM are within the candidate set, we follow previous works [17, 56] and use constrained beam search. + +# 3.5 Discussion + +As shown in Table 1, we compare GenSAR with various generative search or recommendation methods in terms of scale (number of parameters), backbone architecture used, and applicable tasks. GenSAR adopts T5-small as its backbone, resulting in a relatively small number of parameters while being capable of serving both S&R tasks. Compared with existing methods, it achieves an optimal balance between efficiency and effectiveness. + +In terms of novelty, unlike existing methods that focus solely on either semantic or collaborative information in identifier design, our approach incorporates both the semantic information required for search and the collaborative signals essential for recommendation. This joint consideration helps alleviate the trade-off between S&R. + +# 4 Experiments + +We conducted experiments to evaluate the performance of GenSAR. + +# 4.1 Experimental Setup + +4.1.1 Dataset. We conducted experiments on the following datasets: (1) Amazon $^{1}$ [13, 24]: Following previous works [2, 3, 29, 31], we use the semi-synthetic dataset based on Amazon recommendation data as the public dataset for our experiments. $^{2}$ (2) Commercial: To thoroughly evaluate the effectiveness of GenSAR, we collected a dataset from a Chinese commercial app, containing S&R interactions from 10,000 users over two weeks. For details on data + +Table 2: Statistics of the datasets used in this paper. "S" and "R" denote search and recommendation, respectively. + +
Dataset#Users#Items#Queries#Interaction-R#Interaction-S
Amazon192,40362,8839831,266,9031,081,934
Commercial10,000782,225135,2064,286,866383,465
+ +processing and train/validation/test splitting, please see the code link. + +4.1.2 Baselines. In this work, we use the following representative methods as baselines for comparison with GenSAR. + +First, we compare with the following recommendation models: (1) Sequential Recommendation: GRU4Rec [16]; SASRec [19]; FMLP-Rec [57]; LRURec [45]. (2) Generative Recommendation: P5-CID [11, 17]; TIGER [26]; LC-Rec [56]. Next, we compare with the following search models: (1) Personalized Search: QEM [2]; TEM [6]; CoPPS [7]. (2) Dense Retrieval: E5³ [36]; BGE⁴ [40]. (3) Generative Retrieval: DSI-QG [59]; WebUltron [58]; GenRet [33]. Finally, we compare with the following joint S&R models: JSR [46]; SESRec [31]; UnifiedSSR [41]; UniSAR [29]. For more details on the baselines, please see the code link. + +4.1.3 Evaluation Metrics & Implementation Details. Following previous works [29, 31, 57], we use ranking metrics including top- $k$ Hit Ratio (HR) and top- $k$ Normalized Discounted Cumulative Gain (NDCG). We report the results for $k$ values of $\{1, 5, 10\}$ , and since NDCG@1 is the same as HR@1, we do not report it. For more details on the evaluation and model implementation, please see the code link. + +# 4.2 Overall Performance + +Table 3 and Table 4 show the S&R results on two datasets, respectively. From the results, we can observe that: + +- Firstly, it can be seen that compared to existing search or recommendation models, GenSAR achieves state-of-the-art results. This validates the effectiveness of GenSAR in alleviating the trade-off between S&R through generative retrieval, by designing joint identifiers and training tasks for both tasks. +- Secondly, we can observe that most joint S&R methods (e.g., JSR, UniSAR, GenSAR) outperform traditional methods that using only item IDs, such as sequential recommendation (e.g., SASRec, FMLP-Rec) and personalized search methods (e.g., QEM, TEM, CoPPS). This demonstrates the advantages of jointly modeling of S&R, as it enhances the performance of both tasks. +- Thirdly, it can be observed that for search, dense retrieval (e.g., E5, BGE) and generative retrieval (e.g., GenRet, GenSAR) methods that rely on semantic information outperform personalized search models (e.g., QEM, TEM, CoPPS) that rely solely on ID information. This also confirms that for search, semantic information is more important than collaborative information. + +Table 3: The recommendation performance of different methods on the two datasets. The best and the second-best methods are highlighted in bold and underlined fonts, respectively. The improvements over the second-best methods are statistically significant (t-test, $p$ -value $< 0.05$ ). Following commonly used settings [29, 31, 57], we pair the ground-truth item with 99 randomly sampled items that the user has not interacted with to form the candidate list. + +
DatasetsMetricsRecommendationJoint Search and Recommendation
GRU4RecSASRecFMLP-RecLRURecP5-CIDTIGERLC-RecJSRSESRecUnifiedSSRUniSARGenSAR
AmazonHR@10.04400.05440.05340.05440.08810.10730.10630.06570.06270.04770.06800.1261
HR@50.17160.18870.18980.18900.18740.20460.19730.20750.20830.16670.21710.2228
HR@100.28840.29920.30410.30010.27900.28520.27600.31880.32090.27070.33190.3063
NDCG@50.10740.12160.12170.12180.13800.15650.15220.13710.13590.10710.14320.1748
NDCG@100.14490.15710.15840.15750.16740.18240.17740.17290.17210.14050.18020.2015
CommercialHR@10.10220.15190.14420.13630.28430.26300.27030.15760.18900.15150.22140.2997
HR@50.25260.28120.27110.26370.33050.30130.30010.26850.28450.28440.32280.3496
HR@100.35270.37160.35840.35250.38300.34480.33330.35290.36900.38700.40560.4031
NDCG@50.17870.21790.20930.20210.30720.28190.28490.21420.23700.21950.27270.3241
NDCG@100.21100.24700.23730.23060.32400.29580.29550.24130.26410.25240.29930.3411
+ +Table 4: The search performance of different methods on the two datasets. Since search relies on semantic relevance, previous works [29, 41] that randomly sample negatives often produce overly easy examples, leading to inflated performance and poor model differentiation. To address this, we follow prior personalized search methods [1, 9] and use BM25 [27] to retrieve 99 harder negatives, forming a candidate list with the positive sample for more accurate evaluation. + +
DatasetsMetricsSearchJoint Search and Recommendation
QEMTEMCoPPSE5BGEDSI-QGWebUltronGenRetJSRUnifiedSSRUniSARGenSAR
AmazonHR@10.15120.08390.09430.32890.40300.35580.34320.41730.08350.07990.11220.5262
HR@50.31010.34710.33800.59450.62640.58480.54640.65130.24070.24760.31290.7529
HR@100.46570.51810.49090.72030.74750.68970.62160.73390.34630.36140.43330.8217
NDCG@50.23110.21730.21540.46620.52190.47640.45070.53990.16230.16620.21430.6485
NDCG@100.28090.27220.26470.50690.56130.51030.47480.56670.19620.20280.25330.6710
CommercialHR@10.03110.03280.02650.12770.12670.10160.08040.11710.02730.01190.05110.1249
HR@50.08700.11060.09980.31080.31840.28310.26190.33200.12020.04700.18100.3655
HR@100.15390.19250.17920.40440.41940.41320.39920.46660.21370.08730.32310.5250
NDCG@50.05860.07150.06260.22300.22580.19400.17210.22730.07280.02920.11440.2472
NDCG@100.07990.09770.08800.25330.25840.23590.21640.27080.10260.04200.15970.2987
+ +# 4.3 Ablation Study + +We conducted ablation study on the Commercial dataset to validate the effectiveness of the various training tasks in GenSAR, as shown in Table 5. + +Impact of Behavior Token. As shown in Section 3.2.4, we pretended a token indicating the type of behavior to be an identifier of each user interaction, enabling the LLM to recognize different behavior types. To evaluate its impact, we removed this behavior token, as shown in Table 5 ("w/o Behavior Token"). The results indicate that removing the behavior token degrades performance, validating that adding this token helps the LLM better understand the relationship between user S&R behaviors. + +Next Recommendation Item Prediction (NRIP). As shown in Section 3.3.1, we incorporated the training task "Next Recommendation Item Prediction" (NRIP), which enables the LLM to predict the next item to recommend based on user history. To evaluate its impact, we removed this task, as shown in Table 5 ("w/o NRIP"). The results demonstrate that removing this task significantly degrades recommendation performance and slightly reduces search performance, highlighting the importance of NRIP. Additionally, this demonstrates that recommendation training tasks can enhance search performance, verifying that recommendation can benefit search. + +Next Search Query Prediction (NSQP). We included the training task "Next Search Query Prediction" (NSQP) to enable the LLM to better understand user intent by predicting the next query a user might want to search, as described in Section 3.3.2. To evaluate its impact, we observed the results after removing this task, as shown in Table 5 ("w/o NSQP"). The results indicate that removing this task significantly degrades search performance and also affects recommendation performance, demonstrating that NSQP helps the model better understand user search intent. + +Next Search Item Prediction (NSIP). In Section 3.3.3, we introduced the training task "Next Search Item Prediction" (NSIP), which allows the LLM to predict the next item a user might click based on their history and input query. We analyzed the impact of this task, as shown in Table 5 ("w/o NSIP"). The results indicate that removing this task significantly degrades search performance, while also slightly affecting recommendation performance. This demonstrates the importance of NSIP for search and further highlights that search training tasks can enhance recommendation performance, validating that search can assist recommendation. + +Identifier-Language Alignment. In Section 3.3.4, we introduced two tasks, Desc2ID and ID2Desc, for identifier-language alignment, which help the LLM better understand the semantic and collaborative identifiers of each item. We observed the impact of + +Table 5: Ablation study on the Commercial dataset, where "w/o" denotes the removal of the corresponding module in GenSAR. + +
ModelRecommendationSearch
HR@5NDCG@5HR@5NDCG@5
GenSAR0.34960.32410.36550.2472
w/o Behavior Token0.34300.31930.32980.2224
w/o NRIP0.06650.03920.34560.2342
w/o NSQP0.34010.31630.30890.2053
w/o NSIP0.33900.31520.16680.1113
w/o Desc2ID0.34160.31880.33550.2278
w/o ID2Desc0.34580.32200.33980.2308
+ +![](images/ecce5a0559e48049574371e9b23e1b75329fe824cc739e97fb0a009bfb585c53.jpg) +(a) Recommendation Performance + +![](images/397a18df437bd5056f0bc169b0aeea0e25bdb1287fa1e6affaf38d1a3ef93475.jpg) +(b) Search Performance + +![](images/fb95df98856241a96ab0dff04372bd6259441931f9571d2e0c69523d6587464d.jpg) +Figure 4: Performance of GenSAR using different identifiers. +Figure 5: Collision rate of different identifiers. + +removing these two tasks, as shown in Table 5 (w/o "Desc2ID" and w/o "ID2Desc"). It can be seen that removing these tasks leads to a decrease in both S&R performance, indicating the effectiveness of these tasks in helping the LLM better understand item identifiers. + +# 4.4 Experimental Analysis + +We conducted further experiments on the Commercial dataset to analyze the effectiveness of different modules in GenSAR. + +4.4.1 Impact of Different Identifier. To balance the semantic information needed for search and the collaborative information needed for recommendation, we designed the joint S&R identifier in Section 3.2. To validate its effectiveness, we compared it with identifiers learned directly from semantic embeddings or collaborative embeddings using RQ-VAE [26, 56], as shown in Figure 4. "Only Collaborative" represents using only collaborative embeddings, while "Only Semantic" represents using only semantic embeddings. The results show that identifiers derived solely from semantic or collaborative information lead to degraded performance. Furthermore, using only collaborative information results in worse search performance, which aligns with the fact that search relies more on semantic information. + +![](images/7eaf0981cb5f20384989aa56e36ad232812d82cdb50bb0e90f7e15e1c57b40d9.jpg) +(a) Recommendation Performance + +![](images/6a62487787c0bd42a8e02f0875370a16e73635f9a6a3a3a06be3b844c3d351b4.jpg) +(b) Search Performance +Figure 6: Performance under different numbers of shared codebooks $L_{m}$ . We fix $L_{m} + L_{n} = 4$ and vary $L_{m}$ to observe the results. + +4.4.2 Collision Rate of Different Identifier. Additionally, we analyzed the advantages of different identifiers from the perspective of collision rate. The formula for calculating the collision rate is as follows: + +$$ +\text {C o l l i s i o n R a t e} = 1 - \frac {\# \text {U n i q u e I d e n s i t e r}}{\# \text {U n i q u e I t e m}}, +$$ + +where # Unique Identifier represents the number of unique identifiers, and # Unique Item represents the number of unique items. Since RQ-VAE does not guarantee a unique identifier for each item during the learning process, collisions may occur where different items share the same identifier [26, 56]. A higher collision rate can negatively impact the model's performance. From Figure 5, it can be observed that the two identifiers assigned to each item in GenSAR, incorporating both semantic and collaborative information, have a lower collision rate of $0.18\%$ and $0.39\%$ , respectively. In contrast, identifiers derived solely from semantic embeddings or collaborative embeddings exhibit higher collision rates of $1.37\%$ and $0.90\%$ , respectively. This further validates the advantage of the identifiers in GenSAR, as their lower collision rate enables the model to achieve better performance. + +4.4.3 Impact of Hyper-parameters. As described in Section 3.2, we have $L_{m}$ -level shared codebooks and $L_{n}$ -level specific codebooks. Here, we analyze the impact of the number of shared and specific codebooks ( $L_{m}$ and $L_{n}$ ) on the results, as shown in Figure 6. We fix $L_{m} + L_{n} = 4$ and observe the results. It can be seen that having too few ( $L_{m} = 1$ ) or too many ( $L_{m} = 3$ ) shared codebooks fails to achieve strong performance in both S&R. This indicates that $L_{m}$ needs to be properly set so that the identifier can capture both the shared information between semantics and collaboration as well as their specific characteristics. Only in this way can we achieve better performance in both S&R. + +Additionally, we analyzed the impact of identifier length on performance, as shown in Figure 7. We fix $L_{m} = 2$ and vary $L_{n}$ to adjust the identifier length and observe the results. It can be seen that both shorter $(L_{m} + L_{n} = 3)$ and longer $(L_{m} + L_{n} = 5)$ identifiers lead to performance degradation. This is because, when the identifier is too short, the identifiers learned through RQ-VAE are more prone to collisions, resulting in a higher collision rate and making it difficult for the model to distinguish between different items. On the other hand, when the identifier is too long, the model requires more decoding steps during item generation, leading to accumulated errors and ultimately deteriorating performance. Therefore, it is essential to properly set the identifier length to achieve better performance. + +![](images/15322b8796533cd47d49b9fc9b63e562b17b827521e8c4717054345e22078795.jpg) +(a) Recommendation Performance + +![](images/6c8e992b55df077b506a54aa95974fa63b68fb5fc6708d66c678ddb49d9a9381.jpg) +(b) Search Performance +Figure 7: Performance under different length of the identifier. We fix $L_{m} = 2$ and vary $L_{n}$ to adjust the identifier length. + +# 5 Conclusion + +In this paper, we propose GenSAR, which unifies balanced search and recommendation through generative retrieval to alleviate the trade-off between the two tasks and improve their performance. To balance the semantic information required for search and the collaborative information needed for recommendation, we design the joint S&R identifier and different training tasks. First, we learn two identifiers for each item to represent semantic and collaborative information, respectively. These identifiers share a common part to capture the information shared between semantics and collaboration while retaining distinct parts to preserve specific information. Second, we design different training tasks to help the model better understand the requirements of S&R tasks. We also validate the effectiveness of GenSAR through extensive experiments. + +# References + +[1] Wasi Uddin Ahmad, Kai-Wei Chang, and Hongning Wang. 2018. Multi-task learning for document ranking and query suggestion. In International conference on learning representations. +[2] Qingyao Ai, Daniel N Hill, SVN Vishwanathan, and W Bruce Croft. 2019. A zero attention model for personalized product search. In Proceedings of the 28th ACM International Conference on Information and Knowledge Management. 379-388. +[3] Qingyao Ai, Yongfeng Zhang, Keping Bi, Xu Chen, and W Bruce Croft. 2017. Learning a hierarchical embedding model for personalized product search. In Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval. 645-654. +[4] Jinheon Baek, Nirupama Chandrasekaran, Silviu Cucerzan, Allen Herring, and Sujay Kumar Jauhar. 2024. Knowledge-augmented large language models for personalized contextual query suggestion. In Proceedings of the ACM on Web Conference 2024. 3355-3366. +[5] Michele Bevilacqua, Giuseppe Ottaviano, Patrick Lewis, Scott Yih, Sebastian Riedel, and Fabio Petroni. 2022. Autoregressive search engines: Generating substrings as document identifiers. Advances in Neural Information Processing Systems 35 (2022), 31668-31683. +[6] Keping Bi, Qingyao Ai, and W Bruce Croft. 2020. A transformer-based embedding model for personalized product search. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 1521-1524. +[7] Shitong Dai, Jiongnan Liu, Zhicheng Dou, Haonan Wang, Lin Liu, Bo Long, and Ji-Rong Wen. 2023. Contrastive Learning for User Sequence Representation in Personalized Product Search. In Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, KDD 2023, Long Beach, CA, USA, August 6-10, 2023. ACM, 380-389. +[8] Sunhao Dai, Ninglu Shao, Haiyuan Zhao, Weijie Yu, Zihua Si, Chen Xu, Zhongxiang Sun, Xiao Zhang, and Jun Xu. 2023. Uncovering chatgpt's capabilities in recommender systems. In Proceedings of the 17th ACM Conference on Recommender Systems. 1126-1132. +[9] Chenlong Deng, Yujia Zhou, and Zhicheng Dou. 2022. Improving personalized search with dual-feedback network. In Proceedings of the fifteenth ACM international conference on web search and data mining, 210-218. +[10] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers). + +[11] Shijie Geng, Shuchang Liu, Zuohui Fu, Yingqiang Ge, and Yongfeng Zhang. 2022. Recommendation as language processing (rlp): A unified pretrain, personalized prompt & predict paradigm (p5). In Proceedings of the 16th ACM Conference on Recommender Systems. 299-315. +[12] Yulong Gu, Wentian Bao, Dan Ou, Xiang Li, Baoliang Cui, Biyu Ma, Haikuan Huang, Qingwen Liu, and Xiaoyi Zeng. 2021. Self-supervised learning on users' spontaneous behaviors for multi-scenario ranking in e-commerce. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management. 3828-3837. +[13] Ruining He and Julian McAuley. 2016. Ups and downs: Modeling the visual evolution of fashion trends with one-class collaborative filtering. In proceedings of the 25th international conference on world wide web. 507-517. +[14] Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. 2017. Neural collaborative filtering. In Proceedings of the 26th international conference on world wide web. 173-182. +[15] Zhankui He, Handong Zhao, Zhaowen Wang, Zhe Lin, Ajinkya Kale, and Julian Mcauley. 2022. Query-Aware Sequential Recommendation. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management (Atlanta, GA, USA) (CIKM '22). Association for Computing Machinery, New York, NY, USA, 4019-4023. +[16] Balázs Hidasi, Alexandros Karatzoglou, Linas Baltrunas, and Domonkos Tikk. 2016. Session-based Recommendations with Recurrent Neural Networks. In 4th International Conference on Learning Representations, ICLR 2016, San Juan, Puerto Rico, May 2-4, 2016, Conference Track Proceedings, Yoshua Bengio and Yann LeCun (Eds.). +[17] Wenyue Hua, Shuyuan Xu, Yingqiang Ge, and Yongfeng Zhang. 2023. How to index item ids for recommendation foundation models. In Proceedings of the Annual International ACM SIGIR Conference on Research and Development in Information Retrieval in the Asia Pacific Region. 195-204. +[18] Gautier Izacard, Mathilde Caron, Lucas Hosseini, Sebastian Riedel, Piotr Bojanowski, Armand Joulin, and Edouard Grave. 2021. Unsupervised dense information retrieval with contrastive learning. arXiv preprint arXiv:2112.09118 (2021). +[19] Wang-Cheng Kang and Julian McAuley. 2018. Self-attentive sequential recommendation. In 2018 IEEE International Conference on Data Mining (ICDM). IEEE, 197-206. +[20] Doyup Lee, Chihuon Kim, Saehoon Kim, Minsu Cho, and Wook-Shin Han. 2022. Autoregressive image generation using residual quantization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 11523-11532. +[21] Xiaoxi Li, Jiajie Jin, Yujiia Zhou, Yuyao Zhang, Peitian Zhang, Yutao Zhu, and Zhicheng Dou. 2024. From matching to generation: A survey on generative information retrieval. arXiv preprint arXiv:2404.14851 (2024). +[22] Yongqi Li, Nan Yang, Liang Wang, Furu Wei, and Wenjie Li. 2023. Multiview Identifiers Enhanced Generative Retrieval. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 6636-6648. +[23] Jiayi Liao, Sihang Li, Zhengyi Yang, Jiancan Wu, Yancheng Yuan, Xiang Wang, and Xiangnan He. 2024. Llara: Large language-recommendation assistant. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1785-1795. +[24] Julian McAuley, Christopher Targett, Qinfeng Shi, and Anton Van Den Hengel. 2015. Image-based recommendations on styles and substitutes. In Proceedings of the 38th international ACM SIGIR conference on research and development in information retrieval. 43-52. +[25] Gustavo Penha, Ali Vardasbi, Enrico Palumbo, Marco De Nadai, and Hugues Bouchard. 2024. Bridging Search and Recommendation in Generative Retrieval: Does One Task Help the Other?. In Proceedings of the 18th ACM Conference on Recommender Systems. 340-349. +[26] Shashank Rajput, Nikhil Mehta, Anima Singh, Raghunandan Hulikal Keshavan, Trung Vu, Lukasz Heldt, Lichan Hong, Yi Tay, Vinh Tran, Jonah Samost, et al. 2023. Recommender systems with generative retrieval. Advances in Neural Information Processing Systems 36 (2023), 10299-10315. +[27] Stephen Robertson, Hugo Zaragoza, et al. 2009. The probabilistic relevance framework: BM25 and beyond. Foundations and Trends® in Information Retrieval 3, 4 (2009), 333-389. +[28] Chenglei Shen, Xiao Zhang, Teng Shi, Changshuo Zhang, Guofu Xie, and Jun Xu. 2024. A survey of controllable learning: Methods and applications in information retrieval. arXiv preprint arXiv:2407.06083 (2024). +[29] Teng Shi, Zihua Si, Jun Xu, Xiao Zhang, Xiaoxue Zang, Kai Zheng, Dewei Leng, Yanan Niu, and Yang Song. 2024. UniSAR: Modeling User Transition Behaviors between Search and Recommendation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1029-1039. +[30] Zihua Si, Xueran Han, Xiao Zhang, Jun Xu, Yue Yin, Yang Song, and Ji-Rong Wen. 2022. A Model-Agnostic Causal Learning Framework for Recommendation Using Search Data. In Proceedings of the ACM Web Conference 2022 (Virtual Event, Lyon, France) (WWW '22). Association for Computing Machinery, New York, NY, USA, 224-233. + +[31] Zihua Si, Zhongxiang Sun, Xiao Zhang, Jun Xu, Xiaoxue Zang, Yang Song, Kun Gai, and Ji-Rong Wen. 2023. When search meets recommendation: Learning disentangled search representation for recommendation. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1313-1323. +[32] Fei Sun, Jun Liu, Jian Wu, Changhua Pei, Xiao Lin, Wenwu Ou, and Peng Jiang. 2019. BERT4Rec: Sequential Recommendation with Bidirectional Encoder Representations from Transformer. In Proceedings of the 28th ACM International Conference on Information and Knowledge Management (Beijing, China) (CIKM '19). ACM, New York, NY, USA, 1441-1450. +[33] Weiwei Sun, Lingyong Yan, Zheng Chen, Shuaiqiang Wang, Haichao Zhu, Pengjie Ren, Zhumin Chen, Dawei Yin, Maarten Rijke, and Zhaochun Ren. 2024. Learning to tokenize for generative retrieval. Advances in Neural Information Processing Systems 36 (2024). +[34] Jiakai Tang, Sunhao Dai, Teng Shi, Jun Xu, Xu Chen, Wen Chen, Wu Jian, and Yuning Jiang. 2025. Think Before Recommend: Unleashing the Latent Reasoning Power for Sequential Recommendation. arXiv:2503.22675 [cs.IR] https://arxiv.org/abs/2503.22675 +[35] Yi Tay, Vinh Tran, Mostafa Dehghani, Jianmo Ni, Dara Bahri, Harsh Mehta, Zhen Qin, Kai Hui, Zhe Zhao, Jai Gupta, et al. 2022. Transformer memory as a differentiable search index. Advances in Neural Information Processing Systems 35 (2022), 21831-21843. +[36] Liang Wang, Nan Yang, Xiaolong Huang, Linjun Yang, Rangan Majumder, and Furu Wei. 2024. Multilingual e5 text embeddings: A technical report. arXiv preprint arXiv:2402.05672 (2024). +[37] Yuening Wang, Man Chen, Yaochen Hu, Wei Guo, Yingxue Zhang, Hufeng Guo, Yong Liu, and Mark Coates. 2024. Enhancing Click-through Rate Prediction in Recommendation Domain with Search Query Representation. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 2462-2471. +[38] Yujing Wang, Yingyan Hou, Haonan Wang, Ziming Miao, Shibin Wu, Qi Chen, Yuqing Xia, Chengmin Chi, Guoshuai Zhao, Zheng Liu, et al. 2022. A neural corpus indexer for document retrieval. Advances in Neural Information Processing Systems 35 (2022), 25600-25614. +[39] Yu Wang, Zhengyang Wang, Hengrui Zhang, Qingyu Yin, Xianfeng Tang, Yinghan Wang, Danqing Zhang, Limeng Cui, Monica Cheng, Bing Yin, et al. 2023. Exploiting intent evolution in e-commercial query recommendation. In Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 5162-5173. +[40] Shitao Xiao, Zheng Liu, Peitian Zhang, Niklas Muennighoff, Defu Lian, and Jian-Yun Nie. 2024. C-pack: Packed resources for general chinese embeddings. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 641-649. +[41] Jiayi Xie, Shang Liu, Gao Cong, and Zhenzhong Chen. 2024. UnifiedSSR: A Unified Framework of Sequential Search and Recommendation. In Proceedings of the ACM on Web Conference 2024. 3410-3419. +[42] Lee Xiong, Chenyan Xiong, Ye Li, Kwok-Fung Tang, Jialin Liu, Paul Bennett, Junaid Ahmed, and Arnold Overwijk. 2020. Approximate nearest neighbor negative contrastive learning for dense text retrieval. arXiv preprint arXiv:2007.00808 (2020). +[43] Jing Yao, Zhicheng Dou, Ruobing Xie, Yanxiong Lu, Zhiping Wang, and Ji-Rong Wen. 2021. USER: A Unified Information Search and Recommendation Model Based on Integrated Behavior Sequence. In Proceedings of the 30th ACM International Conference on Information J& Knowledge Management (Virtual Event, Queensland, Australia) (CIKM '21). Association for Computing Machinery, New York, NY, USA, 2373-2382. +[44] Zheng Yuan, Fajie Yuan, Yu Song, Youhua Li, Junchen Fu, Fei Yang, Yunzhu Pan, and Yongxin Ni. 2023. Where to go next for recommender systems? id-vs. modality-based recommender models revisited. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2639-2649. +[45] Zhenrui Yue, Yueqi Wang, Zhankui He, Huimin Zeng, Julian McAuley, and Dong Wang. 2024. Linear recurrent units for sequential recommendation. In Proceedings of the 17th ACM International Conference on Web Search and Data Mining. 930-938. +[46] Hamed Zamani and W. Bruce Croft. 2018. Joint Modeling and Optimization of Search and Recommendation. In Proceedings of the First Biennial Conference on Design of Experimental Search & Information Retrieval Systems, Bertinoro, Italy, August 28-31, 2018 (CEUR Workshop Proceedings, Vol. 2167). CEUR-WS.org, 36-41. +[47] Hamed Zamani and W. Bruce Croft. 2020. Learning a Joint Search and Recommendation Model from User-Item Interactions. In Proceedings of the 13th International Conference on Web Search and Data Mining (Houston, TX, USA) (WSDM '20). Association for Computing Machinery, New York, NY, USA, 717–725. +[48] Changshuo Zhang, Teng Shi, Xiao Zhang, Qi Liu, Ruobing Xie, Jun Xu, and Ji-Rong Wen. 2024. Modeling Domain and Feedback Transitions for Cross-Domain Sequential Recommendation. arXiv preprint arXiv:2408.08209 (2024). +[49] Changshuo Zhang, Teng Shi, Xiao Zhang, Yanping Zheng, Ruobing Xie, Qi Liu, Jun Xu, and Ji-Rong Wen. 2024. QAGCF: Graph Collaborative Filtering for Q&A + +Recommendation. arXiv preprint arXiv:2406.04828 (2024). +[50] Changshuo Zhang, Xiao Zhang, Teng Shi, Jun Xu, and Ji-Rong Wen. 2025. Test-Time Alignment for Tracking User Interest Shifts in Sequential Recommendation. arXiv:2504.01489 [cs.IR] https://arxiv.org/abs/2504.01489 +[51] Kepu Zhang, Teng Shi, Sunhao Dai, Xiao Zhang, Yinfeng Li, Jing Lu, Xiaoxue Zang, Yang Song, and Jun Xu. 2024. SAQRec: Aligning Recommender Systems to User Satisfaction via Questionnaire Feedback. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 3165-3175. +[52] Xiao Zhang, Teng Shi, Jun Xu, Zhenhua Dong, and Ji-Rong Wen. 2024. Model-Agnostic Causal Embedding Learning for Counterfactually Group-Fair Recommendation. IEEE Transactions on Knowledge and Data Engineering (2024). +[53] Yuting Zhang, Yiqing Wu, Ruidong Han, Ying Sun, Yongchun Zhu, Xiang Li, Wei Lin, Fuzhen Zhuang, Zhulin An, and Yongjun Xu. 2024. Unified Dual-Intent Translation for Joint Modeling of Search and Recommendation. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 6291-6300. +[54] Kai Zhao, Yukun Zheng, Tao Zhuang, Xiang Li, and Xiaoyi Zeng. 2022. Joint Learning of E-Commerce Search and Recommendation with a Unified Graph Neural Network. In Proceedings of the Fifteenth ACM International Conference on Web Search and Data Mining (Virtual Event, AZ, USA) (WSDM '22). Association for Computing Machinery, New York, NY, USA, 1461–1469. +[55] Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, et al. 2023. A survey of large language models. arXiv preprint arXiv:2303.18223 (2023). +[56] Bowen Zheng, Yupeng Hou, Hongyu Lu, Yu Chen, Wayne Xin Zhao, Ming Chen, and Ji-Rong Wen. 2024. Adapting large language models by integrating collaborative semantics for recommendation. In 2024 IEEE 40th International Conference on Data Engineering (ICDE). IEEE, 1435-1448. +[57] Kun Zhou, Hui Yu, Wayne Xin Zhao, and Ji-Rong Wen. 2022. Filter-Enhanced MLP is All You Need for Sequential Recommendation. In Proceedings of the ACM Web Conference 2022 (Virtual Event, Lyon, France) (WWW'22). Association for Computing Machinery, New York, NY, USA, 2388-2399. +[58] Yujia Zhou, Jing Yao, Ledell Wu, Zhicheng Dou, and Ji-Rong Wen. 2023. WebUltron: An Ultimate Retriever on Webpages Under the Model-Centric Paradigm. IEEE Transactions on Knowledge and Data Engineering (2023). +[59] Shengyao Zhuang, Houxing Ren, Linjun Shou, Jian Pei, Ming Gong, Guido Zuccon, and Daxin Jiang. 2022. Bridging the gap between indexing and retrieval for differentiable search index with query generation. arXiv preprint arXiv:2206.10128 (2022). \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05730/images/0eb6ab1ddbe1b20f36c0374a24bd3c90cda43ce603162394fc84a167ceba717f.jpg b/data/2025/2504_05xxx/2504.05730/images/0eb6ab1ddbe1b20f36c0374a24bd3c90cda43ce603162394fc84a167ceba717f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..635b40db161973743c9cc92031917af7736dbb72 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/0eb6ab1ddbe1b20f36c0374a24bd3c90cda43ce603162394fc84a167ceba717f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0da224309bb738ce843fa8eb07f4a8b68b9e4c2890156d173a0b3e0cd1e1885 +size 8891 diff --git a/data/2025/2504_05xxx/2504.05730/images/15322b8796533cd47d49b9fc9b63e562b17b827521e8c4717054345e22078795.jpg b/data/2025/2504_05xxx/2504.05730/images/15322b8796533cd47d49b9fc9b63e562b17b827521e8c4717054345e22078795.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3da1e9ee0b20266baff5bc9a000f606fdb0b4d35 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/15322b8796533cd47d49b9fc9b63e562b17b827521e8c4717054345e22078795.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce4f56d18430c7b11a350f042769e172def999bc979879b9e75ed0e65a784c8f +size 11071 diff --git a/data/2025/2504_05xxx/2504.05730/images/1c7094567f67d298be0fb6a01b3baf82c4ade3cf31f329fbe9471a91e92cf859.jpg b/data/2025/2504_05xxx/2504.05730/images/1c7094567f67d298be0fb6a01b3baf82c4ade3cf31f329fbe9471a91e92cf859.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fff8b291800e275a62fa496a53d950d1c5443d4c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/1c7094567f67d298be0fb6a01b3baf82c4ade3cf31f329fbe9471a91e92cf859.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4fc01645b2d30aa17ff3b61443d11588ee18dde3e698b693a9823f2198ec658 +size 4443 diff --git a/data/2025/2504_05xxx/2504.05730/images/228e08291e1d7b20ac0fd506ea17d1f7057573c3ae0e9a105154addb1aa62c9d.jpg b/data/2025/2504_05xxx/2504.05730/images/228e08291e1d7b20ac0fd506ea17d1f7057573c3ae0e9a105154addb1aa62c9d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..233bbee40fdb43c2b56a26c84e000f9b2bd5a296 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/228e08291e1d7b20ac0fd506ea17d1f7057573c3ae0e9a105154addb1aa62c9d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb24bebbe3ba19f2091bd934c6918ab79c01336caf88fcc1596ab53ca95f70be +size 3970 diff --git a/data/2025/2504_05xxx/2504.05730/images/2492eabca5dc294d37215f26e8b18bb0004c6adcb54dc3e00c8d92abb5dbddc4.jpg b/data/2025/2504_05xxx/2504.05730/images/2492eabca5dc294d37215f26e8b18bb0004c6adcb54dc3e00c8d92abb5dbddc4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc531f710821811f62e3f024355a7eadd2e57150 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/2492eabca5dc294d37215f26e8b18bb0004c6adcb54dc3e00c8d92abb5dbddc4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d122bf61d99913ff3625f0c9ba3ea094d9d4ec886f946f80fda896dc425cb44 +size 18353 diff --git a/data/2025/2504_05xxx/2504.05730/images/27a565a2cae8a58717a525c0c21642d79e223adc5714f2e704bc946d2a8d0dfd.jpg b/data/2025/2504_05xxx/2504.05730/images/27a565a2cae8a58717a525c0c21642d79e223adc5714f2e704bc946d2a8d0dfd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3331e5528fb9296ca17f7bc616b4c6961c259788 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/27a565a2cae8a58717a525c0c21642d79e223adc5714f2e704bc946d2a8d0dfd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f6e04de8fedb487131a3893060d6f0157bac22e739f43660f367e7042f0928e +size 4947 diff --git a/data/2025/2504_05xxx/2504.05730/images/37fd8faf0dbc9a27d1f649d989e50bad7c61a13634be58e67730c12d90be365f.jpg b/data/2025/2504_05xxx/2504.05730/images/37fd8faf0dbc9a27d1f649d989e50bad7c61a13634be58e67730c12d90be365f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1b81718af4c1b641e03f2714486ea557d94d57c7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/37fd8faf0dbc9a27d1f649d989e50bad7c61a13634be58e67730c12d90be365f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4f954a7b5d9fdd3c2274e7087d304161c0a6b505a92fd3f280765339ea03e5b +size 4823 diff --git a/data/2025/2504_05xxx/2504.05730/images/397a18df437bd5056f0bc169b0aeea0e25bdb1287fa1e6affaf38d1a3ef93475.jpg b/data/2025/2504_05xxx/2504.05730/images/397a18df437bd5056f0bc169b0aeea0e25bdb1287fa1e6affaf38d1a3ef93475.jpg new file mode 100644 index 0000000000000000000000000000000000000000..39a5175ecc75808fc4986427fa8ee2b31b15a9d3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/397a18df437bd5056f0bc169b0aeea0e25bdb1287fa1e6affaf38d1a3ef93475.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d71df3dcab633b6934dbfc841d7d00e9c3a368841a6b37df45810922174546bb +size 11973 diff --git a/data/2025/2504_05xxx/2504.05730/images/497db33f26d209bee1ab9f00731f63686f13f15e7147069d77be62270990c6f2.jpg b/data/2025/2504_05xxx/2504.05730/images/497db33f26d209bee1ab9f00731f63686f13f15e7147069d77be62270990c6f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6430b0b6b027265b56d14796cc65634014e83dc7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/497db33f26d209bee1ab9f00731f63686f13f15e7147069d77be62270990c6f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b41019f9794e16565fd6de3780bb0b6ffd14167a1b84c1e0d2bba1599d1fedf0 +size 3330 diff --git a/data/2025/2504_05xxx/2504.05730/images/4a8c7471cd3f46d74306080e8030939a0f5e5c7b10ca3e1dbbcdd8fc1c650e19.jpg b/data/2025/2504_05xxx/2504.05730/images/4a8c7471cd3f46d74306080e8030939a0f5e5c7b10ca3e1dbbcdd8fc1c650e19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..147f0a3ab3cbf4913c36f988d375ecb562c53d9a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/4a8c7471cd3f46d74306080e8030939a0f5e5c7b10ca3e1dbbcdd8fc1c650e19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a7da50162eda8161bbba2239c77547d28179a473e29bc98f88d391d87d162ab +size 5996 diff --git a/data/2025/2504_05xxx/2504.05730/images/4c0d5a8c826b9a848953e41dba99e0e6c1b976a8c50d336b47c5cec0bf95c750.jpg b/data/2025/2504_05xxx/2504.05730/images/4c0d5a8c826b9a848953e41dba99e0e6c1b976a8c50d336b47c5cec0bf95c750.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc39fe0288676186eb97c4f5625577c8a151fcc0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/4c0d5a8c826b9a848953e41dba99e0e6c1b976a8c50d336b47c5cec0bf95c750.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2198400f7b1eb160dee0410947a573bfe4dff32b8f268652a1db3fdc50ea438 +size 3654 diff --git a/data/2025/2504_05xxx/2504.05730/images/51bd9a74e03503c42b16c873c60574e952fffb76b5dda5e0c1097b003e3b016e.jpg b/data/2025/2504_05xxx/2504.05730/images/51bd9a74e03503c42b16c873c60574e952fffb76b5dda5e0c1097b003e3b016e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f314545af3d60dcd18e9db618cc32778e2caa7e7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/51bd9a74e03503c42b16c873c60574e952fffb76b5dda5e0c1097b003e3b016e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c73a2c60c9d596ebc08c1c1f46d05e6c81d2a6cb4c76983cacd1b49e508055c +size 7235 diff --git a/data/2025/2504_05xxx/2504.05730/images/628b138c6ab45d0641959ce993765f284c74574409b6b4abd42ddca7826121cf.jpg b/data/2025/2504_05xxx/2504.05730/images/628b138c6ab45d0641959ce993765f284c74574409b6b4abd42ddca7826121cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74a899237207b42f2bc7bb5a7bdc370c422350a3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/628b138c6ab45d0641959ce993765f284c74574409b6b4abd42ddca7826121cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fe1d1dc668435e424320614400da0f032d17f28e65e29eeb65c47a767fe8b5c +size 10218 diff --git a/data/2025/2504_05xxx/2504.05730/images/63f2ec8f4ddeb16ebbdee2423338aaa595f1ec4d49f6298f818e2b331b3d817e.jpg b/data/2025/2504_05xxx/2504.05730/images/63f2ec8f4ddeb16ebbdee2423338aaa595f1ec4d49f6298f818e2b331b3d817e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30bca8a176994aaf27ce2df6d7ddfad7083266da --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/63f2ec8f4ddeb16ebbdee2423338aaa595f1ec4d49f6298f818e2b331b3d817e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f4a6608f81d44dd9b1d9555f33c8f8e6fd9d7a42a5476dafd600d678eacce38 +size 4784 diff --git a/data/2025/2504_05xxx/2504.05730/images/6a62487787c0bd42a8e02f0875370a16e73635f9a6a3a3a06be3b844c3d351b4.jpg b/data/2025/2504_05xxx/2504.05730/images/6a62487787c0bd42a8e02f0875370a16e73635f9a6a3a3a06be3b844c3d351b4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ca2cca8f365ba4643fc8c054b5c72bbdefb87193 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/6a62487787c0bd42a8e02f0875370a16e73635f9a6a3a3a06be3b844c3d351b4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a4b687690357f0f464a82a0981d9a61fdf2540920f213959d90e66fb3574962 +size 11302 diff --git a/data/2025/2504_05xxx/2504.05730/images/6c8e992b55df077b506a54aa95974fa63b68fb5fc6708d66c678ddb49d9a9381.jpg b/data/2025/2504_05xxx/2504.05730/images/6c8e992b55df077b506a54aa95974fa63b68fb5fc6708d66c678ddb49d9a9381.jpg new file mode 100644 index 0000000000000000000000000000000000000000..24b9a971d74c014f89dd74976e50ebdaf3c8a515 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/6c8e992b55df077b506a54aa95974fa63b68fb5fc6708d66c678ddb49d9a9381.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8fc90c7fcbd261563e6c186bd73dcffa212f2ec3a683a7332384b90e0d89386 +size 11407 diff --git a/data/2025/2504_05xxx/2504.05730/images/6f6f1922647d9249aee68c7d995b2b82222e7b5d1b014f595f282014536b00fc.jpg b/data/2025/2504_05xxx/2504.05730/images/6f6f1922647d9249aee68c7d995b2b82222e7b5d1b014f595f282014536b00fc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eaf8bfc75de989aebf3409234b9ae71e238dabf6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/6f6f1922647d9249aee68c7d995b2b82222e7b5d1b014f595f282014536b00fc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2aca7801c5c2cc1a946d6f6a0e951319299535a280525e9ba2b129d0e62e2185 +size 21275 diff --git a/data/2025/2504_05xxx/2504.05730/images/757f11261ee325c692b5f00503a37aa68878e51f8dce743a82fe3fffcbf18636.jpg b/data/2025/2504_05xxx/2504.05730/images/757f11261ee325c692b5f00503a37aa68878e51f8dce743a82fe3fffcbf18636.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30ac8b8792c5b3ba28d94fd62c7895330c87cc5f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/757f11261ee325c692b5f00503a37aa68878e51f8dce743a82fe3fffcbf18636.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c892936020538dfe7b0e6e02db5fd099dbc41bc2eb46e1e81f06b514d10cf9c +size 79885 diff --git a/data/2025/2504_05xxx/2504.05730/images/75c269323e7a69b60fb9e7112ba8de4ac044bc997681bbbf5853b88f1e5eda9c.jpg b/data/2025/2504_05xxx/2504.05730/images/75c269323e7a69b60fb9e7112ba8de4ac044bc997681bbbf5853b88f1e5eda9c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b413f5ac1acfd2d070f8220864b0c300d6bc011a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/75c269323e7a69b60fb9e7112ba8de4ac044bc997681bbbf5853b88f1e5eda9c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c05d5852721710301fc1519d0bb033f5eaf341d5051317f4f4c3eb35a29e9d1 +size 32693 diff --git a/data/2025/2504_05xxx/2504.05730/images/76915c600d59c0426bebb18d152b3f3340735ab970484cea05e5b3b7c1a13978.jpg b/data/2025/2504_05xxx/2504.05730/images/76915c600d59c0426bebb18d152b3f3340735ab970484cea05e5b3b7c1a13978.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ebbc9d05d990adb567244cd4d17e3802a843558 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/76915c600d59c0426bebb18d152b3f3340735ab970484cea05e5b3b7c1a13978.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:047985dffc1b1278cc235ad504438f14409afd6c91387da445792c9f0efd27d3 +size 105224 diff --git a/data/2025/2504_05xxx/2504.05730/images/79113b0abda90b910446c480a05b8133424858f455357ba8d282c5426bd427bb.jpg b/data/2025/2504_05xxx/2504.05730/images/79113b0abda90b910446c480a05b8133424858f455357ba8d282c5426bd427bb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1acfde7176bae549fa9410ba3c98dcd467aa7203 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/79113b0abda90b910446c480a05b8133424858f455357ba8d282c5426bd427bb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b9ee23aa3f2d5aa3dbde190b937ac6ea8bbceb0fb077e7f3012efeb70231c3e +size 3409 diff --git a/data/2025/2504_05xxx/2504.05730/images/7bb6ece7f2d7b36cebf8d7e58ec55a09fb93edc237d58a95e2e8cf47c9a46c11.jpg b/data/2025/2504_05xxx/2504.05730/images/7bb6ece7f2d7b36cebf8d7e58ec55a09fb93edc237d58a95e2e8cf47c9a46c11.jpg new file mode 100644 index 0000000000000000000000000000000000000000..72b6c48c04e648af77beba0c73ba7f4d1c0f337c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/7bb6ece7f2d7b36cebf8d7e58ec55a09fb93edc237d58a95e2e8cf47c9a46c11.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c7ec9638b8cda5278aa1d9a70fee7476e7229fbffb3db684fec5fc4c81df2c1 +size 4331 diff --git a/data/2025/2504_05xxx/2504.05730/images/7eaf0981cb5f20384989aa56e36ad232812d82cdb50bb0e90f7e15e1c57b40d9.jpg b/data/2025/2504_05xxx/2504.05730/images/7eaf0981cb5f20384989aa56e36ad232812d82cdb50bb0e90f7e15e1c57b40d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7870471feb5e4c38c4c076d794c77862db1f10d6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/7eaf0981cb5f20384989aa56e36ad232812d82cdb50bb0e90f7e15e1c57b40d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84cf393a07ed21ecd17f5e082d5811d33c444c69e33f02134a9efc88372f2303 +size 10855 diff --git a/data/2025/2504_05xxx/2504.05730/images/829817898d5e6fcd9cc0d3598c229b4c1dda91385eafc9f3cb585061267148b0.jpg b/data/2025/2504_05xxx/2504.05730/images/829817898d5e6fcd9cc0d3598c229b4c1dda91385eafc9f3cb585061267148b0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bf9a8d7625e836601d6250f916614e4c5edfa4e7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/829817898d5e6fcd9cc0d3598c229b4c1dda91385eafc9f3cb585061267148b0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9effefb7b414af59851b40caa93b22d89a9e9885b4f9011a18f105d0eb0b7bd7 +size 5156 diff --git a/data/2025/2504_05xxx/2504.05730/images/852f49bc31d0e7f4e1eea1e2c9a018de3829e9f64126eff2bd75879bf40fcc2b.jpg b/data/2025/2504_05xxx/2504.05730/images/852f49bc31d0e7f4e1eea1e2c9a018de3829e9f64126eff2bd75879bf40fcc2b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4563f3f5e060466607b25728065fbe5c75441f90 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/852f49bc31d0e7f4e1eea1e2c9a018de3829e9f64126eff2bd75879bf40fcc2b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1c0e78f0bdd767f0d99e085ab71f48a47a9832ecac12a1ff400673778f3c0e6 +size 41809 diff --git a/data/2025/2504_05xxx/2504.05730/images/8b613695057799ce96cafacfb9556faf83ad4ca6cc814befda690d610957dbea.jpg b/data/2025/2504_05xxx/2504.05730/images/8b613695057799ce96cafacfb9556faf83ad4ca6cc814befda690d610957dbea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c916cf2a4d2cd700c6b4c8f5d1167032ce47e29 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/8b613695057799ce96cafacfb9556faf83ad4ca6cc814befda690d610957dbea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5ee76707e1b8b112d3770931ac7c0b38225733cf37af25648c5470141aede1a +size 9099 diff --git a/data/2025/2504_05xxx/2504.05730/images/8f4fe20c7e4cf1f7fd577a858b8252ea3e484d5051f0b5eefda1ee11d2c4078f.jpg b/data/2025/2504_05xxx/2504.05730/images/8f4fe20c7e4cf1f7fd577a858b8252ea3e484d5051f0b5eefda1ee11d2c4078f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e33f9a158a185b4f1fab91a0a22f7bc1a8229e94 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/8f4fe20c7e4cf1f7fd577a858b8252ea3e484d5051f0b5eefda1ee11d2c4078f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14e8c42fc6cd552658fecb23406efe7799fe69e03ad76acfc4e11a266522f0bf +size 7086 diff --git a/data/2025/2504_05xxx/2504.05730/images/95093f04c6644bbe7aecaf5f919e51994d54ed28757e7a3491da4d26fd377e17.jpg b/data/2025/2504_05xxx/2504.05730/images/95093f04c6644bbe7aecaf5f919e51994d54ed28757e7a3491da4d26fd377e17.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5980ac97758193edf4f609f5ffffb5fdedcc8bb4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/95093f04c6644bbe7aecaf5f919e51994d54ed28757e7a3491da4d26fd377e17.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b95968b97660caa6d29b80d212f3280cb46613cdacd193259b2f939268f38789 +size 6627 diff --git a/data/2025/2504_05xxx/2504.05730/images/9edd3d9b02c30b3b1ca9e7ca1a76fa2f37f746d23bd32759acf9aefd1337c887.jpg b/data/2025/2504_05xxx/2504.05730/images/9edd3d9b02c30b3b1ca9e7ca1a76fa2f37f746d23bd32759acf9aefd1337c887.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a4b4d5c71e288d370f9a733035df7dddda3d49e2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/9edd3d9b02c30b3b1ca9e7ca1a76fa2f37f746d23bd32759acf9aefd1337c887.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70bb0feab24cdde57dd8b500302cf58e96f43063e81e93c47363d01410b9de0c +size 17819 diff --git a/data/2025/2504_05xxx/2504.05730/images/9ef159d9206396424cc39b0b32aac40dd09c48c5a6fb6c344842ec88edc285fe.jpg b/data/2025/2504_05xxx/2504.05730/images/9ef159d9206396424cc39b0b32aac40dd09c48c5a6fb6c344842ec88edc285fe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2cb85ef6df7eec6f0f729b3064024b6636cca435 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/9ef159d9206396424cc39b0b32aac40dd09c48c5a6fb6c344842ec88edc285fe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:007f46eb0a2707360c58f8229617c6e1b3bead4927016eba595922b74f65c7c5 +size 15076 diff --git a/data/2025/2504_05xxx/2504.05730/images/b55451757a74944f442675e8b2b471b2bdc8a5b861025f96cc9df71ed56a04d8.jpg b/data/2025/2504_05xxx/2504.05730/images/b55451757a74944f442675e8b2b471b2bdc8a5b861025f96cc9df71ed56a04d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..06cb7c364a7420a89ff70142206e1b3ccfd0e916 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/b55451757a74944f442675e8b2b471b2bdc8a5b861025f96cc9df71ed56a04d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac8109508e97a922e2816ca6cd376b4d76840458672d12faf8dd8e43187d9a93 +size 5040 diff --git a/data/2025/2504_05xxx/2504.05730/images/c149eb9030dbb2180ada8f29499c99c7af59ffbadfa06650df7389e5df9e316a.jpg b/data/2025/2504_05xxx/2504.05730/images/c149eb9030dbb2180ada8f29499c99c7af59ffbadfa06650df7389e5df9e316a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a9d4f7126b23d53ebeb293b875d5184d8b01363 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/c149eb9030dbb2180ada8f29499c99c7af59ffbadfa06650df7389e5df9e316a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e6136972762a3bd13e577b051b4ac55c5ed8de9fa65963fa1666e840877c055 +size 106851 diff --git a/data/2025/2504_05xxx/2504.05730/images/dc85e68a4b71505e6970c9ea8942a4575ade26683aebfa5ef35ab1305ccebad6.jpg b/data/2025/2504_05xxx/2504.05730/images/dc85e68a4b71505e6970c9ea8942a4575ade26683aebfa5ef35ab1305ccebad6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..402dba0867aa648de50862e53e8674901c777722 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/dc85e68a4b71505e6970c9ea8942a4575ade26683aebfa5ef35ab1305ccebad6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4643364010b3a6fa0f2e3cfabbe6453dd1277e65f9e435ac1c5e5f2402ddc07 +size 4679 diff --git a/data/2025/2504_05xxx/2504.05730/images/e040a9a20613ff2e4aac1a9989029a9c27f0a635e661211182bfd56c180bee23.jpg b/data/2025/2504_05xxx/2504.05730/images/e040a9a20613ff2e4aac1a9989029a9c27f0a635e661211182bfd56c180bee23.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ba3d8c1d01265d43651467c5d6d49d62912ef095 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/e040a9a20613ff2e4aac1a9989029a9c27f0a635e661211182bfd56c180bee23.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d81d01e4f92b30b664aa1c7189dcb0149ec3ea7620af9a4c33f287ad66ee9a5f +size 4602 diff --git a/data/2025/2504_05xxx/2504.05730/images/e45b5fc8963a58ca17d8e6b143177c75ae85289ec72817d7d0c1cf393f2e9f9a.jpg b/data/2025/2504_05xxx/2504.05730/images/e45b5fc8963a58ca17d8e6b143177c75ae85289ec72817d7d0c1cf393f2e9f9a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2e6eec57a76afed0cfaf3a780234b3e65e11f53a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/e45b5fc8963a58ca17d8e6b143177c75ae85289ec72817d7d0c1cf393f2e9f9a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:327695a3cd78014345cdf677e94f9748541a947d61c9d32e23e87a5c14f06e35 +size 4304 diff --git a/data/2025/2504_05xxx/2504.05730/images/ecce5a0559e48049574371e9b23e1b75329fe824cc739e97fb0a009bfb585c53.jpg b/data/2025/2504_05xxx/2504.05730/images/ecce5a0559e48049574371e9b23e1b75329fe824cc739e97fb0a009bfb585c53.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe163afdbf5efad6abd402b74024aaf14c7a5549 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/ecce5a0559e48049574371e9b23e1b75329fe824cc739e97fb0a009bfb585c53.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9add1e653727d88920b9a85258c5a2aa1e48fc5b205dbb4c61f50e817417f166 +size 12108 diff --git a/data/2025/2504_05xxx/2504.05730/images/f192efc4432be9c84135b4e47e067d801708b5583a257f3b98c32bb142b4144e.jpg b/data/2025/2504_05xxx/2504.05730/images/f192efc4432be9c84135b4e47e067d801708b5583a257f3b98c32bb142b4144e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d259e562da577f0e178b4cad7ae30a3c77310f7c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/f192efc4432be9c84135b4e47e067d801708b5583a257f3b98c32bb142b4144e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12c0de420f7cd5e768a30b973dffe1b6a7e46a9d96475d034dfa2f70c0c7e0ad +size 4592 diff --git a/data/2025/2504_05xxx/2504.05730/images/fb95df98856241a96ab0dff04372bd6259441931f9571d2e0c69523d6587464d.jpg b/data/2025/2504_05xxx/2504.05730/images/fb95df98856241a96ab0dff04372bd6259441931f9571d2e0c69523d6587464d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8baba5072bca78ae34a68435ff9e032e068962c3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/images/fb95df98856241a96ab0dff04372bd6259441931f9571d2e0c69523d6587464d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77c0d4f89472cb633c7a2d0ec208912ddc0854eb38f11168bab3032d45308c5a +size 17259 diff --git a/data/2025/2504_05xxx/2504.05730/layout.json b/data/2025/2504_05xxx/2504.05730/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ded24f685fb63f2c4fccd8ed422f3d0d0f295a11 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05730/layout.json @@ -0,0 +1,12872 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 113, + 80, + 499, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 80, + 499, + 99 + ], + "spans": [ + { + "bbox": [ + 113, + 80, + 499, + 99 + ], + "type": "text", + "content": "Unified Generative Search and Recommendation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 118, + 105, + 164, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 105, + 164, + 118 + ], + "spans": [ + { + "bbox": [ + 118, + 105, + 164, + 118 + ], + "type": "text", + "content": "Teng Shi" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 83, + 118, + 200, + 130 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 118, + 200, + 130 + ], + "spans": [ + { + "bbox": [ + 83, + 118, + 200, + 130 + ], + "type": "text", + "content": "Renmin University of China" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 131, + 171, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 131, + 171, + 141 + ], + "spans": [ + { + "bbox": [ + 111, + 131, + 171, + 141 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 100, + 143, + 183, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 143, + 183, + 154 + ], + "spans": [ + { + "bbox": [ + 100, + 143, + 183, + 154 + ], + "type": "text", + "content": "shiteng@ruc.edu.cn" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 285, + 105, + 324, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 285, + 105, + 324, + 118 + ], + "spans": [ + { + "bbox": [ + 285, + 105, + 324, + 118 + ], + "type": "text", + "content": "Jun Xu*" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 276, + 119, + 335, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 119, + 335, + 132 + ], + "spans": [ + { + "bbox": [ + 276, + 119, + 335, + 132 + ], + "type": "text", + "content": "Xiao Zhang" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 248, + 133, + 363, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 248, + 133, + 363, + 144 + ], + "spans": [ + { + "bbox": [ + 248, + 133, + 363, + 144 + ], + "type": "text", + "content": "Renmin University of China" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 276, + 144, + 335, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 144, + 335, + 156 + ], + "spans": [ + { + "bbox": [ + 276, + 144, + 335, + 156 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 244, + 156, + 366, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 156, + 366, + 167 + ], + "spans": [ + { + "bbox": [ + 244, + 156, + 366, + 167 + ], + "type": "text", + "content": "{junxu,zhangx89}@ruc.edu.cn" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 434, + 105, + 505, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 434, + 105, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 434, + 105, + 505, + 118 + ], + "type": "text", + "content": "Xiaoxue Zang" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 443, + 119, + 496, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 443, + 119, + 496, + 133 + ], + "spans": [ + { + "bbox": [ + 443, + 119, + 496, + 133 + ], + "type": "text", + "content": "Kai Zheng" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 407, + 133, + 533, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 407, + 133, + 533, + 144 + ], + "spans": [ + { + "bbox": [ + 407, + 133, + 533, + 144 + ], + "type": "text", + "content": "Kuaishou Technology Co., Ltd." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 440, + 144, + 499, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 440, + 144, + 499, + 156 + ], + "spans": [ + { + "bbox": [ + 440, + 144, + 499, + 156 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 432, + 156, + 507, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 432, + 156, + 507, + 167 + ], + "spans": [ + { + "bbox": [ + 432, + 156, + 507, + 167 + ], + "type": "text", + "content": "xxic666@126.com" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 424, + 168, + 515, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 424, + 168, + 515, + 179 + ], + "spans": [ + { + "bbox": [ + 424, + 168, + 515, + 179 + ], + "type": "text", + "content": "zhengk92@gmail.com" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 196, + 189, + 249, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 189, + 249, + 202 + ], + "spans": [ + { + "bbox": [ + 196, + 189, + 249, + 202 + ], + "type": "text", + "content": "Yang Song" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 160, + 202, + 287, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 202, + 287, + 213 + ], + "spans": [ + { + "bbox": [ + 160, + 202, + 287, + 213 + ], + "type": "text", + "content": "Kuaishou Technology Co., Ltd." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 194, + 214, + 252, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 214, + 252, + 225 + ], + "spans": [ + { + "bbox": [ + 194, + 214, + 252, + 225 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 193, + 226, + 252, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 193, + 226, + 252, + 238 + ], + "spans": [ + { + "bbox": [ + 193, + 226, + 252, + 238 + ], + "type": "text", + "content": "ys@sonyis.me" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 362, + 189, + 411, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 362, + 189, + 411, + 202 + ], + "spans": [ + { + "bbox": [ + 362, + 189, + 411, + 202 + ], + "type": "text", + "content": "Enyun Yu" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 361, + 202, + 414, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 202, + 414, + 213 + ], + "spans": [ + { + "bbox": [ + 361, + 202, + 414, + 213 + ], + "type": "text", + "content": "Independent" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 358, + 214, + 417, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 214, + 417, + 225 + ], + "spans": [ + { + "bbox": [ + 358, + 214, + 417, + 225 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 347, + 227, + 427, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 227, + 427, + 238 + ], + "spans": [ + { + "bbox": [ + 347, + 227, + 427, + 238 + ], + "type": "text", + "content": "yuenyun@126.com" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 51, + 245, + 96, + 256 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 245, + 96, + 256 + ], + "spans": [ + { + "bbox": [ + 51, + 245, + 96, + 256 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 50, + 259, + 296, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 259, + 296, + 534 + ], + "spans": [ + { + "bbox": [ + 50, + 259, + 296, + 534 + ], + "type": "text", + "content": "Modern commercial platforms typically offer both search and recommendation functionalities to serve diverse user needs, making joint modeling of these tasks an appealing direction. While prior work has shown that integrating search and recommendation can be mutually beneficial, it also reveals a performance trade-off: enhancements in one task often come at the expense of the other. This challenge arises from their distinct information requirements: search emphasizes semantic relevance between queries and items, whereas recommendation depends more on collaborative signals among users and items. Effectively addressing this trade-off requires tackling two key problems: (1) integrating both semantic and collaborative signals into item representations, and (2) guiding the model to distinguish and adapt to the unique demands of search and recommendation. The emergence of generative retrieval with Large Language Models (LLMs) presents new possibilities. This paradigm encodes items as identifiers and frames both search and recommendation as sequential generation tasks, offering the flexibility to leverage multiple identifiers and task-specific prompts. In light of this, we introduce GenSAR, a unified generative framework for balanced search and recommendation. Our approach designs dual-purpose identifiers and tailored training strategies to incorporate complementary signals and align with task-specific objectives. Experiments on both public and commercial datasets demonstrate that GenSAR effectively reduces the trade-off and achieves state-of-the-art performance on both tasks." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 51, + 544, + 124, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 544, + 124, + 555 + ], + "spans": [ + { + "bbox": [ + 51, + 544, + 124, + 555 + ], + "type": "text", + "content": "CCS Concepts" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 50, + 558, + 296, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 558, + 296, + 579 + ], + "spans": [ + { + "bbox": [ + 50, + 558, + 296, + 579 + ], + "type": "text", + "content": "- Information systems " + }, + { + "bbox": [ + 50, + 558, + 296, + 579 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 50, + 558, + 296, + 579 + ], + "type": "text", + "content": " Recommender systems; Personalization." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 50, + 585, + 295, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 585, + 295, + 610 + ], + "spans": [ + { + "bbox": [ + 50, + 585, + 295, + 610 + ], + "type": "text", + "content": "*Corresponding authors. Work partially done at Engineering Research Center of Next-Generation Intelligent Search and Recommendation, Ministry of Education. Work done when Teng Shi was the intern at Kuaishou." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 50, + 617, + 294, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 617, + 294, + 675 + ], + "spans": [ + { + "bbox": [ + 50, + 617, + 294, + 675 + ], + "type": "text", + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 52, + 676, + 217, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 676, + 217, + 684 + ], + "spans": [ + { + "bbox": [ + 52, + 676, + 217, + 684 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 52, + 685, + 289, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 685, + 289, + 693 + ], + "spans": [ + { + "bbox": [ + 52, + 685, + 289, + 693 + ], + "type": "text", + "content": "© 2018 Copyright held by the owner/author(s). Publication rights licensed to ACM." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 52, + 693, + 162, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 693, + 162, + 700 + ], + "spans": [ + { + "bbox": [ + 52, + 693, + 162, + 700 + ], + "type": "text", + "content": "ACM ISBN 978-1-4503-XXXX-X/18/06" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 52, + 700, + 162, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 700, + 162, + 709 + ], + "spans": [ + { + "bbox": [ + 52, + 700, + 162, + 709 + ], + "type": "text", + "content": "https://doi.org/XXXXXXXXXXXXXXXXXX" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 315, + 245, + 368, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 245, + 368, + 258 + ], + "spans": [ + { + "bbox": [ + 315, + 245, + 368, + 258 + ], + "type": "text", + "content": "Keywords" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 315, + 260, + 499, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 260, + 499, + 271 + ], + "spans": [ + { + "bbox": [ + 315, + 260, + 499, + 271 + ], + "type": "text", + "content": "Recommendation; Search; Large Language Model" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 315, + 282, + 405, + 291 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 282, + 405, + 291 + ], + "spans": [ + { + "bbox": [ + 315, + 282, + 405, + 291 + ], + "type": "text", + "content": "ACM Reference Format:" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 314, + 292, + 565, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 292, + 565, + 342 + ], + "spans": [ + { + "bbox": [ + 314, + 292, + 565, + 342 + ], + "type": "text", + "content": "Teng Shi, Jun Xu, Xiao Zhang, Xiaoxue Zang, Kai Zheng, Yang Song, and Enyun Yu. 2018. Unified Generative Search and Recommendation. In Proceedings of Make sure to enter the correct conference title from your rights confirmation emai (Conference acronym 'XX). ACM, New York, NY, USA, 10 pages. https://doi.org/XXXXXXXXX.XXXXXXX" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 315, + 366, + 398, + 377 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 366, + 398, + 377 + ], + "spans": [ + { + "bbox": [ + 315, + 366, + 398, + 377 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 313, + 380, + 560, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 380, + 560, + 445 + ], + "spans": [ + { + "bbox": [ + 313, + 380, + 560, + 445 + ], + "type": "text", + "content": "To facilitate the diverse ways of information access, many commercial platforms, such as e-commerce, video, and music platforms, offer both search [2, 3, 6, 7] and recommendation [34, 48-52] (S&R) services. This provides an opportunity for joint modeling of S&R, enabling better user interest modeling and enhancing the performance of both tasks." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 313, + 446, + 560, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 446, + 560, + 578 + ], + "spans": [ + { + "bbox": [ + 313, + 446, + 560, + 578 + ], + "type": "text", + "content": "Many studies have explored joint modeling of S&R, including: leveraging recommendation to enhance search [2, 3, 6, 7], using search to enhance recommendation [15, 30, 31, 37], and unified S&R modeling [29, 41, 43, 46, 47]. Although these studies have demonstrated that S&R can mutually enhance each other, they have also identified a trade-off when the model serves both tasks simultaneously [29]. Specifically, when the recommendation performance improves, the search performance tends to degrade, and vice versa. Empirical analysis of the representative methods of JSR [46] and UniSAR [29] based on a S&R dataset collected from a real commercial platform also confirmed the performance trade-off, as shown in Figure 1(a). More details please refer to Section 4.1.1." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 313, + 578, + 560, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 578, + 560, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 578, + 560, + 710 + ], + "type": "text", + "content": "Analysis also showed that the trade-off is rooted in the different information requirements of S&R. Search typically focuses more on the semantic relevance between queries and items, with traditional search models often based on pre-trained language models [18, 40, 42]. In contrast, recommendation heavily relies on collaborative information, where ID-based recommendation can yield excellent results [14, 19, 44]. Figure 1(b) shows an empirical validation where the S&R performances with ID- and Text-only embeddings are shown. The ID embeddings are randomly initialized and trained, containing collaborative information, while the Text embeddings are trained with BGE [40] and then reduced to the same dimensionality as that of the ID embeddings, containing semantic" + } + ] + } + ], + "index": 42 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 216, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 216, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 216, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.05730v2 [cs.IR] 10 Apr 2025" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 87, + 170, + 152 + ], + "blocks": [ + { + "bbox": [ + 61, + 87, + 170, + 152 + ], + "lines": [ + { + "bbox": [ + 61, + 87, + 170, + 152 + ], + "spans": [ + { + "bbox": [ + 61, + 87, + 170, + 152 + ], + "type": "image", + "image_path": "628b138c6ab45d0641959ce993765f284c74574409b6b4abd42ddca7826121cf.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 75, + 156, + 154, + 166 + ], + "lines": [ + { + "bbox": [ + 75, + 156, + 154, + 166 + ], + "spans": [ + { + "bbox": [ + 75, + 156, + 154, + 166 + ], + "type": "text", + "content": "(a) Trade-off between S&R" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 175, + 87, + 286, + 152 + ], + "blocks": [ + { + "bbox": [ + 175, + 87, + 286, + 152 + ], + "lines": [ + { + "bbox": [ + 175, + 87, + 286, + 152 + ], + "spans": [ + { + "bbox": [ + 175, + 87, + 286, + 152 + ], + "type": "image", + "image_path": "0eb6ab1ddbe1b20f36c0374a24bd3c90cda43ce603162394fc84a167ceba717f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 173, + 156, + 288, + 166 + ], + "lines": [ + { + "bbox": [ + 173, + 156, + 288, + 166 + ], + "spans": [ + { + "bbox": [ + 173, + 156, + 288, + 166 + ], + "type": "text", + "content": "(b) Performance of different embeddings" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 50, + 169, + 295, + 236 + ], + "lines": [ + { + "bbox": [ + 50, + 169, + 295, + 236 + ], + "spans": [ + { + "bbox": [ + 50, + 169, + 295, + 236 + ], + "type": "text", + "content": "Figure 1: Empirical analysis on the Commercial dataset: (a) A trade-off between S&R is observed in representative joint S&R methods, JSR [46] and UniSAR [29]. (b) The performance of the sequential recommendation model SASRec [19] and the product search model QEM [2], using ID and text embeddings, respectively." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 249, + 295, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 249, + 295, + 281 + ], + "spans": [ + { + "bbox": [ + 50, + 249, + 295, + 281 + ], + "type": "text", + "content": "information. From Figure 1(b), we found that recommendation relies more on collaborative information while search focuses more on semantic information." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 282, + 295, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 282, + 295, + 413 + ], + "spans": [ + { + "bbox": [ + 50, + 282, + 295, + 413 + ], + "type": "text", + "content": "Therefore, balancing the semantic information required for search and the collaborative information needed for recommendation becomes a key issue in joint S&R modeling. It is non-trivial and faces two major challenges: (1) How to incorporate both semantic and collaborative information in item representations. Existing joint S&R models typically assign a single representation to each item, making it difficult to capture both types of information effectively; (2) How to let the model understand the difference in information requirements of S&R during training. Current joint models often treat S&R tasks identically, without differentiating them during training. This makes it challenging for the model to grasp their distinct requirements." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 413, + 295, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 413, + 295, + 523 + ], + "spans": [ + { + "bbox": [ + 50, + 413, + 295, + 523 + ], + "type": "text", + "content": "Recently, Large Language Model (LLM) [55]-based generative retrieval for search [35, 59] and recommendation [11, 26, 56] have garnered significant attention. This provides a solution to the aforementioned challenges: (1) Generative retrieval assigns an identifier (a sequence of tokens) to each item, allowing us to assign multiple identifiers to each item to balance semantic and collaborative information; (2) Generative retrieval formulates both S&R as sequence-to-sequence (Seq2Seq) tasks, enabling the unification of different S&R tasks and helping the model better understand the distinct requirements of each task." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 523, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 523, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 523, + 295, + 710 + ], + "type": "text", + "content": "Based on this, we propose GenSAR, which unifies balanced search and recommendation through generative retrieval, thereby alleviating the trade-off between S&R to better enhance each other. Firstly, we design a joint S&R identifier that integrates both semantic and collaborative information. Building on the RQ-VAE [26, 56] method, we employ shared codebooks for both semantic and collaborative information, alongside specific codebooks for each. As a result, items from search are represented by semantic codes, while items from recommendation are represented by collaborative codes. These two codes share a common portion to capture shared information while also retaining distinct parts to preserve the unique characteristics of semantic and collaborative information. Secondly, we design the joint S&R training tasks. We prepend a token representing the behavior type to the item identifier and then input the user's S&R history into the LLM (with the user query also provided for search). Different prompts are used to guide LLMs to predict the next recommended item, the next searched query," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 84, + 558, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 84, + 558, + 106 + ], + "spans": [ + { + "bbox": [ + 314, + 84, + 558, + 106 + ], + "type": "text", + "content": "and the next searched item, enabling the model to understand the distinct requirements for S&R." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 106, + 559, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 106, + 559, + 160 + ], + "spans": [ + { + "bbox": [ + 314, + 106, + 559, + 160 + ], + "type": "text", + "content": "The major contributions of the paper are summarized as follows: We verified the existence of the trade-off between S&R, and identified that this trade-off arises from the different information requirements of S&R. Additionally, we have analyzed the challenges in balancing semantic and collaborative information needed for S&R." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 161, + 559, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 161, + 559, + 215 + ], + "spans": [ + { + "bbox": [ + 314, + 161, + 559, + 215 + ], + "type": "text", + "content": "- We propose GenSAR, which unifies balanced S&R through generative retrieval. We designed a joint S&R identifier to balance semantic and collaborative information, and developed joint training tasks to help the model understand the different requirements of each task." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 216, + 558, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 216, + 558, + 249 + ], + "spans": [ + { + "bbox": [ + 314, + 216, + 558, + 249 + ], + "type": "text", + "content": "- Experimental results on two datasets validate the effectiveness of GenSAR. GenSAR not only surpasses traditional S&R models but also outperforms generative S&R models." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 258, + 403, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 258, + 403, + 270 + ], + "spans": [ + { + "bbox": [ + 315, + 258, + 403, + 270 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 276, + 559, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 276, + 559, + 418 + ], + "spans": [ + { + "bbox": [ + 313, + 276, + 559, + 418 + ], + "type": "text", + "content": "Joint Search and Recommendation. Joint modeling of S&R has attracted increasing attention in recent years and can be broadly categorized into three types: (1) Enhancing search with recommendation [2, 3, 6, 7], such as TEM [6], which uses Transformers to model user preferences, and CoPPS [7], which applies contrastive learning to address data sparsity. (2) Enhancing recommendation with search [15, 30, 31, 37], e.g., SESRec [31], which disentangles similar and dissimilar interests from both histories. (3) Unified modeling of S&R [29, 41, 43, 46, 47, 53, 54], such as JSR [46, 47] with joint loss and UniSAR [29], which models behavior transitions. While these works show mutual benefits between S&R, they also reveal a trade-off [28, 29]. This paper addresses that trade-off within a generative retrieval framework." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 422, + 559, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 422, + 559, + 597 + ], + "spans": [ + { + "bbox": [ + 313, + 422, + 559, + 597 + ], + "type": "text", + "content": "Generative Search and Recommendation. With the rise of Large Language Models (LLMs) [55], LLM-based generative retrieval has been widely explored for both search [5, 21, 33, 35, 38, 58, 59] and recommendation [11, 17, 25, 26, 56]. These methods represent items as identifiers and input the user query (for search) or user history (for recommendation) into the LLM to generate the target item. Identifier designs can be grouped into: (1) Text-based, using item titles [8, 23] or substrings [5, 22]; (2) Non-learnable ID-based, with early methods assigning random IDs [11], and later ones using clustering to encode semantic or collaborative structure [17, 35, 38]; (3) Learnable codebook-based, applying techniques like RQ-VAE [26, 56] to learn identifiers from semantic or collaborative embeddings. However, most existing approaches design identifiers tailored to either search or recommendation, focusing solely on semantic or collaborative information. In joint S&R, balancing both is essential for strong performance across tasks." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 607, + 406, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 607, + 406, + 620 + ], + "spans": [ + { + "bbox": [ + 315, + 607, + 406, + 620 + ], + "type": "text", + "content": "3 Our Approach" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 622, + 559, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 622, + 559, + 708 + ], + "spans": [ + { + "bbox": [ + 313, + 622, + 559, + 708 + ], + "type": "text", + "content": "This section introduces our proposed method, GenSAR. Section 3.1 defines the Joint Search and Recommendation task. Section 3.2 presents the Joint Identifier module, where we design separate semantic and collaborative identifiers to balance the different needs of search and recommendation. Section 3.3 describes task-specific training objectives to help the model capture both types of information. Finally, Section 3.4 details the training and inference process of GenSAR." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 224, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 224, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 224, + 69 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "type": "text", + "content": "Teng Shi et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 65, + 83, + 545, + 297 + ], + "blocks": [ + { + "bbox": [ + 65, + 83, + 545, + 297 + ], + "lines": [ + { + "bbox": [ + 65, + 83, + 545, + 297 + ], + "spans": [ + { + "bbox": [ + 65, + 83, + 545, + 297 + ], + "type": "image", + "image_path": "757f11261ee325c692b5f00503a37aa68878e51f8dce743a82fe3fffcbf18636.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 300, + 560, + 344 + ], + "lines": [ + { + "bbox": [ + 50, + 300, + 560, + 344 + ], + "spans": [ + { + "bbox": [ + 50, + 300, + 560, + 344 + ], + "type": "text", + "content": "Figure 2: The joint search and recommendation identifier. We extract the semantic and collaborative embeddings for each item. These two embeddings are first concatenated and passed through the shared codebooks to learn shared codes. Then, the semantic and collaborative embeddings are separately processed through specific codebooks to learn specific codes. Finally, these codes are concatenated to form two identifiers for each item: one for semantics and one for collaboration." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 349, + 186, + 360 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 349, + 186, + 360 + ], + "spans": [ + { + "bbox": [ + 51, + 349, + 186, + 360 + ], + "type": "text", + "content": "3.1 Problem Formulation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "spans": [ + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "inline_equation", + "content": "\\mathcal{U},\\mathcal{V},Q" + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "text", + "content": " denote the sets of users, items, and queries, respectively. Each user " + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "inline_equation", + "content": "u\\in \\mathcal{U}" + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "text", + "content": " has a chronologically ordered interaction history " + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "inline_equation", + "content": "S_{u} = [(b_{1},x_{1}),(b_{2},x_{2}),\\ldots ,(b_{N},x_{N})]" + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "text", + "content": " that includes her historical S&R behaviors, where " + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "text", + "content": " denotes the number of " + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "text", + "content": " 's historical behaviors. " + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "inline_equation", + "content": "b_{i}\\in \\{\\langle \\mathrm{R}_{\\mathrm{I}}\\rangle ,\\langle \\mathrm{S}_{\\mathrm{Q}}\\rangle ,\\langle \\mathrm{S}_{\\mathrm{I}}\\rangle \\}" + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "text", + "content": " represents the type of the " + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "text", + "content": " -th behavior: " + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "inline_equation", + "content": "\\langle \\mathrm{R_I}\\rangle" + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "text", + "content": " indicates an item clicked by the user after a recommendation, " + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "inline_equation", + "content": "\\langle \\mathrm{S_Q}\\rangle" + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "text", + "content": " represents a query searched by the user, and " + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "inline_equation", + "content": "\\langle \\mathrm{S_I}\\rangle" + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "text", + "content": " denotes an item clicked by the user after searching a query. " + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 50, + 363, + 295, + 460 + ], + "type": "text", + "content": " -th behavior:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 460, + 294, + 489 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 460, + 294, + 489 + ], + "spans": [ + { + "bbox": [ + 106, + 460, + 294, + 489 + ], + "type": "interline_equation", + "content": "x _ {i} = \\left\\{ \\begin{array}{l l} v _ {i}, & \\text {i f} b _ {i} = \\langle \\mathrm {R} _ {\\mathrm {I}} \\rangle \\text {o r} b _ {i} = \\langle \\mathrm {S} _ {\\mathrm {I}} \\rangle , \\\\ q _ {i}, & \\text {i f} b _ {i} = \\langle \\mathrm {S} _ {\\mathrm {Q}} \\rangle , \\end{array} \\right. \\tag {1}", + "image_path": "4a8c7471cd3f46d74306080e8030939a0f5e5c7b10ca3e1dbbcdd8fc1c650e19.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 491, + 295, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 491, + 295, + 536 + ], + "spans": [ + { + "bbox": [ + 50, + 491, + 295, + 536 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 491, + 295, + 536 + ], + "type": "inline_equation", + "content": "v_{i} \\in \\mathcal{V}" + }, + { + "bbox": [ + 50, + 491, + 295, + 536 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 50, + 491, + 295, + 536 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 50, + 491, + 295, + 536 + ], + "type": "text", + "content": "-th interacted item and " + }, + { + "bbox": [ + 50, + 491, + 295, + 536 + ], + "type": "inline_equation", + "content": "q_{i} \\in Q" + }, + { + "bbox": [ + 50, + 491, + 295, + 536 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 50, + 491, + 295, + 536 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 50, + 491, + 295, + 536 + ], + "type": "text", + "content": "-th searched query. Our goal is to enable the model to understand user interests and predict the next item " + }, + { + "bbox": [ + 50, + 491, + 295, + 536 + ], + "type": "inline_equation", + "content": "v_{N+1}" + }, + { + "bbox": [ + 50, + 491, + 295, + 536 + ], + "type": "text", + "content": " for search when " + }, + { + "bbox": [ + 50, + 491, + 295, + 536 + ], + "type": "inline_equation", + "content": "b_{N+1} = \\langle \\mathrm{S}_{\\mathrm{I}} \\rangle" + }, + { + "bbox": [ + 50, + 491, + 295, + 536 + ], + "type": "text", + "content": " or recommendation when " + }, + { + "bbox": [ + 50, + 491, + 295, + 536 + ], + "type": "inline_equation", + "content": "b_{N+1} = \\langle \\mathrm{R}_{\\mathrm{I}} \\rangle" + }, + { + "bbox": [ + 50, + 491, + 295, + 536 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 544, + 295, + 558 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 544, + 295, + 558 + ], + "spans": [ + { + "bbox": [ + 50, + 544, + 295, + 558 + ], + "type": "text", + "content": "3.2 Joint Search and Recommendation Identifier" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 559, + 295, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 559, + 295, + 637 + ], + "spans": [ + { + "bbox": [ + 50, + 559, + 295, + 637 + ], + "type": "text", + "content": "This section introduces the design of the joint S&R identifier (Figure 2). We first extract semantic and collaborative embeddings for each item. Using RQ-VAE [20, 26, 56], we apply both shared and separate codebooks to learn two identifiers per item—one semantic, one collaborative. The identifiers share common parts to capture shared information, while retaining unique parts to reflect task-specific features." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "text", + "content": "3.2.1 Embedding Extraction. For each item " + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "inline_equation", + "content": "v \\in \\mathcal{V}" + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "text", + "content": ", we can input its textual information, such as the title and description, into a pre-trained retrieval model (e.g., BERT [10], BGE [40]) to obtain an embedding " + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_s \\in \\mathbb{R}^{d_s}" + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "text", + "content": " that contains its semantic information. Meanwhile, we can also obtain an embedding " + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_c \\in \\mathbb{R}^{d_c}" + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "text", + "content": " containing its collaborative information from a pre-trained recommendation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 349, + 560, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 349, + 560, + 394 + ], + "spans": [ + { + "bbox": [ + 314, + 349, + 560, + 394 + ], + "type": "text", + "content": "model (e.g., SASRec [19], BERT4Rec [32]). " + }, + { + "bbox": [ + 314, + 349, + 560, + 394 + ], + "type": "inline_equation", + "content": "d_{s}" + }, + { + "bbox": [ + 314, + 349, + 560, + 394 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 349, + 560, + 394 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 314, + 349, + 560, + 394 + ], + "type": "text", + "content": " represent the dimensions of the semantic and collaborative embeddings, respectively. We map the semantic and collaborative embeddings to the same-dimensional latent space using two encoders:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 359, + 413, + 559, + 425 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 359, + 413, + 559, + 425 + ], + "spans": [ + { + "bbox": [ + 359, + 413, + 559, + 425 + ], + "type": "interline_equation", + "content": "\\mathbf {z} _ {s} = \\operatorname {E n c o d e r} _ {s} (\\mathbf {v} _ {s}), \\quad \\mathbf {z} _ {c} = \\operatorname {E n c o d e r} _ {c} (\\mathbf {v} _ {c}), \\tag {2}", + "image_path": "b55451757a74944f442675e8b2b471b2bdc8a5b861025f96cc9df71ed56a04d8.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 443, + 560, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 443, + 560, + 477 + ], + "spans": [ + { + "bbox": [ + 313, + 443, + 560, + 477 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 443, + 560, + 477 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_s \\in \\mathbb{R}^d, \\mathbf{z}_c \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 313, + 443, + 560, + 477 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 443, + 560, + 477 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 313, + 443, + 560, + 477 + ], + "type": "text", + "content": " is the dimension of the latent embeddings, " + }, + { + "bbox": [ + 313, + 443, + 560, + 477 + ], + "type": "inline_equation", + "content": "\\mathrm{Encoder}_s(\\cdot)" + }, + { + "bbox": [ + 313, + 443, + 560, + 477 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 443, + 560, + 477 + ], + "type": "inline_equation", + "content": "\\mathrm{Encoder}_c(\\cdot)" + }, + { + "bbox": [ + 313, + 443, + 560, + 477 + ], + "type": "text", + "content": " are two MLPs (Multilayer Perceptrons)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "spans": [ + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "content": "3.2.2 Residual Quantization. To integrate both semantic and collaborative information, we use " + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "inline_equation", + "content": "L_{m}" + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "content": "-level shared codebooks, along with " + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "inline_equation", + "content": "L_{n}" + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "content": "-level specific codebooks for semantic and collaborative information, respectively. First, the latent embeddings for semantic and collaborative information, " + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_s" + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_c" + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "content": ", are concatenated to form " + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "inline_equation", + "content": "\\mathbf{r}_0^m = [\\mathbf{z}_s; \\mathbf{z}_c] \\in \\mathbb{R}^{2d}" + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "content": ". This " + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "inline_equation", + "content": "\\mathbf{r}_0^m" + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "content": " is then passed through the " + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "inline_equation", + "content": "L_{m}" + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "content": "-level shared codebooks to obtain the shared codes " + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "inline_equation", + "content": "I_m" + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "content": " and the residual embedding " + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "inline_equation", + "content": "\\mathbf{r}_{L_m}^m" + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "content": ". Then, we extract the semantic part " + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "inline_equation", + "content": "\\mathbf{r}_0^s = \\mathbf{r}_{L_m}^m [1:d] \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "content": " and the collaborative part " + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "inline_equation", + "content": "\\mathbf{r}_0^c = \\mathbf{r}_{L_m}^m [d:2d] \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "inline_equation", + "content": "\\mathbf{r}_{L_m}^m" + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "content": ", and input them separately into the semantic and collaborative codebooks to learn their specific codes " + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "inline_equation", + "content": "I_s" + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "inline_equation", + "content": "I_c" + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "content": ", respectively. Finally, the shared and specific codes are concatenated, resulting in two identifiers, " + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "inline_equation", + "content": "I_{m+s}" + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "inline_equation", + "content": "I_{m+c}" + }, + { + "bbox": [ + 314, + 491, + 560, + 660 + ], + "type": "text", + "content": ", for each item. Next, we will introduce the residual quantization process for both the shared and specific codebooks." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 663, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 663, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 663, + 559, + 710 + ], + "type": "text", + "content": "- Shared Codebooks. We have " + }, + { + "bbox": [ + 313, + 663, + 559, + 710 + ], + "type": "inline_equation", + "content": "L_{m}" + }, + { + "bbox": [ + 313, + 663, + 559, + 710 + ], + "type": "text", + "content": "-level shared codebooks. At each level " + }, + { + "bbox": [ + 313, + 663, + 559, + 710 + ], + "type": "inline_equation", + "content": "i \\in \\{1, 2, \\dots, L_{m}\\}" + }, + { + "bbox": [ + 313, + 663, + 559, + 710 + ], + "type": "text", + "content": ", we have a shared codebook " + }, + { + "bbox": [ + 313, + 663, + 559, + 710 + ], + "type": "inline_equation", + "content": "C_{i}^{m} = \\{\\mathbf{e}_{k}\\}_{k=1}^{K}" + }, + { + "bbox": [ + 313, + 663, + 559, + 710 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 313, + 663, + 559, + 710 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 313, + 663, + 559, + 710 + ], + "type": "text", + "content": " is the size of each codebook and " + }, + { + "bbox": [ + 313, + 663, + 559, + 710 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_{k} \\in \\mathbb{R}^{2d}" + }, + { + "bbox": [ + 313, + 663, + 559, + 710 + ], + "type": "text", + "content": " is a learnable code embedding. The residual quantization process for the shared" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 195, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 195, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 195, + 68 + ], + "type": "text", + "content": "Unified Generative Search and Recommendation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 386, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 386, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 386, + 60, + 559, + 69 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 141, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 141, + 95 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 141, + 95 + ], + "type": "text", + "content": "codebooks is as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 98, + 97, + 246, + 118 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 97, + 246, + 118 + ], + "spans": [ + { + "bbox": [ + 98, + 97, + 246, + 118 + ], + "type": "interline_equation", + "content": "c _ {i} ^ {m} = \\underset {k} {\\arg \\min} | | \\mathbf {r} _ {i - 1} ^ {m} - \\mathbf {e} _ {k} | | _ {2} ^ {2}, \\quad \\mathbf {e} _ {k} \\in C _ {i} ^ {m},", + "image_path": "f192efc4432be9c84135b4e47e067d801708b5583a257f3b98c32bb142b4144e.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 100, + 118, + 246, + 133 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 118, + 246, + 133 + ], + "spans": [ + { + "bbox": [ + 100, + 118, + 246, + 133 + ], + "type": "interline_equation", + "content": "\\mathbf {r} _ {i} ^ {m} = \\mathbf {r} _ {i - 1} ^ {m} - \\mathbf {e} _ {c _ {i} ^ {m}}, \\quad \\mathbf {r} _ {0} ^ {m} = [ \\mathbf {z} _ {s}; \\mathbf {z} _ {c} ] \\in \\mathbb {R} ^ {2 d},", + "image_path": "228e08291e1d7b20ac0fd506ea17d1f7057573c3ae0e9a105154addb1aa62c9d.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 136, + 295, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 136, + 295, + 186 + ], + "spans": [ + { + "bbox": [ + 50, + 136, + 295, + 186 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 136, + 295, + 186 + ], + "type": "inline_equation", + "content": "c_{i}^{m}" + }, + { + "bbox": [ + 50, + 136, + 295, + 186 + ], + "type": "text", + "content": " is the assigned code from the " + }, + { + "bbox": [ + 50, + 136, + 295, + 186 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 50, + 136, + 295, + 186 + ], + "type": "text", + "content": "-th level of the shared codebook. " + }, + { + "bbox": [ + 50, + 136, + 295, + 186 + ], + "type": "inline_equation", + "content": "\\mathbf{r}_{i-1}^{m}" + }, + { + "bbox": [ + 50, + 136, + 295, + 186 + ], + "type": "text", + "content": " is the residual from last level. Through the recursive quantization in Eq. (3), we can obtain the shared codes " + }, + { + "bbox": [ + 50, + 136, + 295, + 186 + ], + "type": "inline_equation", + "content": "I_{m} = \\left[c_{1}^{m}, c_{2}^{m}, \\ldots, c_{L_{m}}^{m}\\right]" + }, + { + "bbox": [ + 50, + 136, + 295, + 186 + ], + "type": "text", + "content": " and the residual embedding " + }, + { + "bbox": [ + 50, + 136, + 295, + 186 + ], + "type": "inline_equation", + "content": "\\mathbf{r}_{L_{m}}^{m}" + }, + { + "bbox": [ + 50, + 136, + 295, + 186 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "spans": [ + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "text", + "content": "- Specific Codebooks. We can extract the semantic part " + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "inline_equation", + "content": "\\mathbf{r}_0^s = \\mathbf{r}_{L_m}^m [1:d] \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "text", + "content": " and the collaborative part " + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "inline_equation", + "content": "\\mathbf{r}_0^c = \\mathbf{r}_{L_m}^m [d:2d] \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "text", + "content": " from the residual embedding " + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "inline_equation", + "content": "\\mathbf{r}_{L_m}^m" + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "text", + "content": " outputted by the shared codebooks. We then pass them separately through the " + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "inline_equation", + "content": "L_n" + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "text", + "content": "-level semantic and collaborative specific codebooks " + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "inline_equation", + "content": "C_i^s" + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "inline_equation", + "content": "C_i^c" + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "inline_equation", + "content": "i \\in \\{1, 2, \\dots, L_n\\}" + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "text", + "content": ". Please note that, unlike the shared codebook whose code embeddings are " + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "inline_equation", + "content": "2d" + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "text", + "content": "-dimensional, the code embeddings of the specific codebooks are " + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 50, + 188, + 296, + 300 + ], + "type": "text", + "content": "-dimensional. The residual quantization process for the specific codebooks can be formulated as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 102, + 299, + 242, + 318 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 299, + 242, + 318 + ], + "spans": [ + { + "bbox": [ + 102, + 299, + 242, + 318 + ], + "type": "interline_equation", + "content": "c_{i}^{s} = \\operatorname *{arg min}_{k}\\left|\\left|\\mathbf{r}_{i - 1}^{s} - \\mathbf{e}_{k}\\right|\\right|_{2}^{2},\\quad \\mathbf{e}_{k}\\in C_{i}^{s},", + "image_path": "7bb6ece7f2d7b36cebf8d7e58ec55a09fb93edc237d58a95e2e8cf47c9a46c11.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 102, + 319, + 294, + 340 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 319, + 294, + 340 + ], + "spans": [ + { + "bbox": [ + 102, + 319, + 294, + 340 + ], + "type": "interline_equation", + "content": "c _ {i} ^ {c} = \\underset {k} {\\arg \\min } \\left\\| \\mathbf {r} _ {i - 1} ^ {c} - \\mathbf {e} _ {k} \\right\\| _ {2} ^ {2}, \\quad \\mathbf {e} _ {k} \\in C _ {i} ^ {c}, \\tag {4}", + "image_path": "63f2ec8f4ddeb16ebbdee2423338aaa595f1ec4d49f6298f818e2b331b3d817e.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 102, + 342, + 227, + 355 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 342, + 227, + 355 + ], + "spans": [ + { + "bbox": [ + 102, + 342, + 227, + 355 + ], + "type": "interline_equation", + "content": "\\mathbf {r} _ {i} ^ {s} = \\mathbf {r} _ {i - 1} ^ {s} - \\mathbf {e} _ {c _ {i} ^ {s}}, \\quad \\mathbf {r} _ {i} ^ {c} = \\mathbf {r} _ {i - 1} ^ {c} - \\mathbf {e} _ {c _ {i} ^ {c}},", + "image_path": "497db33f26d209bee1ab9f00731f63686f13f15e7147069d77be62270990c6f2.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 356, + 295, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 356, + 295, + 399 + ], + "spans": [ + { + "bbox": [ + 50, + 356, + 295, + 399 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 356, + 295, + 399 + ], + "type": "inline_equation", + "content": "c_i^s" + }, + { + "bbox": [ + 50, + 356, + 295, + 399 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 356, + 295, + 399 + ], + "type": "inline_equation", + "content": "c_i^r" + }, + { + "bbox": [ + 50, + 356, + 295, + 399 + ], + "type": "text", + "content": " represent the codes assigned by the " + }, + { + "bbox": [ + 50, + 356, + 295, + 399 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 50, + 356, + 295, + 399 + ], + "type": "text", + "content": "-th level semantic-specific and collaborative-specific codebooks, respectively. Through the recursive quantization in Eq. (4), we can obtain the semantic-specific and collaborative-specific codes as follows:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 88, + 403, + 256, + 422 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 403, + 256, + 422 + ], + "spans": [ + { + "bbox": [ + 88, + 403, + 256, + 422 + ], + "type": "interline_equation", + "content": "I _ {s} = \\left[ c _ {1} ^ {s}, c _ {2} ^ {s}, \\dots , c _ {L _ {n}} ^ {s} \\right], \\quad I _ {c} = \\left[ c _ {1} ^ {c}, c _ {2} ^ {c}, \\dots , c _ {L _ {n}} ^ {c} \\right].", + "image_path": "dc85e68a4b71505e6970c9ea8942a4575ade26683aebfa5ef35ab1305ccebad6.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 424, + 295, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 424, + 295, + 456 + ], + "spans": [ + { + "bbox": [ + 50, + 424, + 295, + 456 + ], + "type": "text", + "content": "Finally, by concatenating the shared codes and the specific codes, we can obtain the semantic identifier " + }, + { + "bbox": [ + 50, + 424, + 295, + 456 + ], + "type": "inline_equation", + "content": "I_{m + s}" + }, + { + "bbox": [ + 50, + 424, + 295, + 456 + ], + "type": "text", + "content": " and collaborative identifier " + }, + { + "bbox": [ + 50, + 424, + 295, + 456 + ], + "type": "inline_equation", + "content": "I_{m + c}" + }, + { + "bbox": [ + 50, + 424, + 295, + 456 + ], + "type": "text", + "content": " for item " + }, + { + "bbox": [ + 50, + 424, + 295, + 456 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 50, + 424, + 295, + 456 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 97, + 458, + 247, + 477 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 458, + 247, + 477 + ], + "spans": [ + { + "bbox": [ + 97, + 458, + 247, + 477 + ], + "type": "interline_equation", + "content": "I _ {m + s} = \\left[ c _ {1} ^ {m}, c _ {2} ^ {m}, \\dots , c _ {L _ {m}} ^ {m}, c _ {1} ^ {s}, c _ {2} ^ {s}, \\dots , c _ {L _ {n}} ^ {s} \\right],", + "image_path": "e040a9a20613ff2e4aac1a9989029a9c27f0a635e661211182bfd56c180bee23.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 99, + 478, + 294, + 498 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 478, + 294, + 498 + ], + "spans": [ + { + "bbox": [ + 99, + 478, + 294, + 498 + ], + "type": "interline_equation", + "content": "I _ {m + c} = \\left[ c _ {1} ^ {m}, c _ {2} ^ {m}, \\dots , c _ {L _ {m}} ^ {m}, c _ {1} ^ {c}, c _ {2} ^ {c}, \\dots , c _ {L _ {n}} ^ {c} \\right]. \\tag {5}", + "image_path": "27a565a2cae8a58717a525c0c21642d79e223adc5714f2e704bc946d2a8d0dfd.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 50, + 502, + 295, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 502, + 295, + 534 + ], + "spans": [ + { + "bbox": [ + 50, + 502, + 295, + 534 + ], + "type": "text", + "content": "3.2.3 Identifier Training. After passing through the shared and specific codebooks, we can obtain the semantic and collaborative quantized embeddings as follows:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 58, + 537, + 295, + 567 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 537, + 295, + 567 + ], + "spans": [ + { + "bbox": [ + 58, + 537, + 295, + 567 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {z}} _ {s} = \\sum_ {i = 1} ^ {L _ {m}} \\mathbf {e} _ {c _ {i} ^ {m}} [ 1: d ] + \\sum_ {i = 1} ^ {L _ {n}} \\mathbf {e} _ {c _ {i} ^ {s}}, \\quad \\hat {\\mathbf {z}} _ {c} = \\sum_ {i = 1} ^ {L _ {m}} \\mathbf {e} _ {c _ {i} ^ {m}} [ d: 2 d ] + \\sum_ {i = 1} ^ {L _ {n}} \\mathbf {e} _ {c _ {i} ^ {c}}, \\tag {6}", + "image_path": "8b613695057799ce96cafacfb9556faf83ad4ca6cc814befda690d610957dbea.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 50, + 571, + 295, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 571, + 295, + 642 + ], + "spans": [ + { + "bbox": [ + 50, + 571, + 295, + 642 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 571, + 295, + 642 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_{c_i^m} \\in \\mathbb{R}^{2d}" + }, + { + "bbox": [ + 50, + 571, + 295, + 642 + ], + "type": "text", + "content": " is the code embedding of the shared codebooks, " + }, + { + "bbox": [ + 50, + 571, + 295, + 642 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_{c_i^s} \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 50, + 571, + 295, + 642 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 571, + 295, + 642 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_{c_i^c} \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 50, + 571, + 295, + 642 + ], + "type": "text", + "content": " are the code embeddings of the semantic and collaborative specific codebooks. The quantized semantic embedding " + }, + { + "bbox": [ + 50, + 571, + 295, + 642 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{z}}_s \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 50, + 571, + 295, + 642 + ], + "type": "text", + "content": " and collaborative embedding " + }, + { + "bbox": [ + 50, + 571, + 295, + 642 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{z}}_c \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 50, + 571, + 295, + 642 + ], + "type": "text", + "content": " will be used to reconstruct the original semantic and collaborative embeddings, " + }, + { + "bbox": [ + 50, + 571, + 295, + 642 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_s" + }, + { + "bbox": [ + 50, + 571, + 295, + 642 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 571, + 295, + 642 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_c" + }, + { + "bbox": [ + 50, + 571, + 295, + 642 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 96, + 646, + 295, + 658 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 646, + 295, + 658 + ], + "spans": [ + { + "bbox": [ + 96, + 646, + 295, + 658 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {v}} _ {s} = \\operatorname {D e c o d e r} _ {s} (\\hat {\\mathbf {z}} _ {s}), \\quad \\hat {\\mathbf {v}} _ {c} = \\operatorname {D e c o d e r} _ {c} (\\hat {\\mathbf {z}} _ {c}), \\tag {7}", + "image_path": "37fd8faf0dbc9a27d1f649d989e50bad7c61a13634be58e67730c12d90be365f.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 50, + 662, + 295, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 662, + 295, + 693 + ], + "spans": [ + { + "bbox": [ + 50, + 662, + 295, + 693 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 662, + 295, + 693 + ], + "type": "inline_equation", + "content": "\\mathrm{Decoderr}_s(\\cdot)" + }, + { + "bbox": [ + 50, + 662, + 295, + 693 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 662, + 295, + 693 + ], + "type": "inline_equation", + "content": "\\mathrm{Decoderr}_c(\\cdot)" + }, + { + "bbox": [ + 50, + 662, + 295, + 693 + ], + "type": "text", + "content": " are two MLPs. We can compute the reconstruction loss used for training the encoder and decoder as follows:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 697, + 295, + 711 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 697, + 295, + 711 + ], + "spans": [ + { + "bbox": [ + 107, + 697, + 295, + 711 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {R e c o n}} = \\left\\| \\mathbf {v} _ {s} - \\hat {\\mathbf {v}} _ {s} \\right\\| _ {2} ^ {2} + \\left\\| \\mathbf {v} _ {c} - \\hat {\\mathbf {v}} _ {c} \\right\\| _ {2} ^ {2}. \\tag {8}", + "image_path": "e45b5fc8963a58ca17d8e6b143177c75ae85289ec72817d7d0c1cf393f2e9f9a.jpg" + } + ] + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 322, + 83, + 550, + 187 + ], + "blocks": [ + { + "bbox": [ + 322, + 83, + 550, + 187 + ], + "lines": [ + { + "bbox": [ + 322, + 83, + 550, + 187 + ], + "spans": [ + { + "bbox": [ + 322, + 83, + 550, + 187 + ], + "type": "image", + "image_path": "6f6f1922647d9249aee68c7d995b2b82222e7b5d1b014f595f282014536b00fc.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 323, + 196, + 550, + 266 + ], + "blocks": [ + { + "bbox": [ + 323, + 196, + 550, + 266 + ], + "lines": [ + { + "bbox": [ + 323, + 196, + 550, + 266 + ], + "spans": [ + { + "bbox": [ + 323, + 196, + 550, + 266 + ], + "type": "image", + "image_path": "9edd3d9b02c30b3b1ca9e7ca1a76fa2f37f746d23bd32759acf9aefd1337c887.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 314, + 270, + 559, + 335 + ], + "lines": [ + { + "bbox": [ + 314, + 270, + 559, + 335 + ], + "spans": [ + { + "bbox": [ + 314, + 270, + 559, + 335 + ], + "type": "text", + "content": "Figure 3: Training and Inference Process of GenSAR. During training, we provide LLM with different instructions to generate corresponding responses. During inference, we append a token at the end of the instruction to indicate the type of behavior to be predicted, enabling the LLM to be applied to either search or recommendation tasks." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "bbox": [ + 315, + 343, + 558, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 343, + 558, + 355 + ], + "spans": [ + { + "bbox": [ + 315, + 343, + 558, + 355 + ], + "type": "text", + "content": "We can also compute the loss for residual quantization as follows:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 339, + 359, + 535, + 389 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 359, + 535, + 389 + ], + "spans": [ + { + "bbox": [ + 339, + 359, + 535, + 389 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {R Q}} ^ {m} = \\sum_ {i = 1} ^ {L _ {m}} | | \\mathrm {s g} [ \\mathbf {r} _ {i - 1} ^ {m} ] - \\mathbf {e} _ {c _ {i} ^ {m}} | | _ {2} ^ {2} + \\alpha | | \\mathbf {r} _ {i - 1} ^ {m} - \\mathrm {s g} [ \\mathbf {e} _ {c _ {i} ^ {m}} ] | | _ {2} ^ {2},", + "image_path": "51bd9a74e03503c42b16c873c60574e952fffb76b5dda5e0c1097b003e3b016e.jpg" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 339, + 392, + 558, + 422 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 392, + 558, + 422 + ], + "spans": [ + { + "bbox": [ + 339, + 392, + 558, + 422 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {R Q}} ^ {s} = \\sum_ {i = 1} ^ {L _ {n}} | | \\operatorname {s g} \\left[ \\mathbf {r} _ {i - 1} ^ {s} \\right] - \\mathbf {e} _ {c _ {i} ^ {s}} | | _ {2} ^ {2} + \\alpha | | \\mathbf {r} _ {i - 1} ^ {s} - \\operatorname {s g} \\left[ \\mathbf {e} _ {c _ {i} ^ {s}} \\right] | | _ {2} ^ {2}, \\tag {9}", + "image_path": "8f4fe20c7e4cf1f7fd577a858b8252ea3e484d5051f0b5eefda1ee11d2c4078f.jpg" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 339, + 424, + 529, + 453 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 424, + 529, + 453 + ], + "spans": [ + { + "bbox": [ + 339, + 424, + 529, + 453 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {R Q}} ^ {c} = \\sum_ {i = 1} ^ {L _ {n}} | | \\mathrm {s g} [ \\mathbf {r} _ {i - 1} ^ {c} ] - \\mathbf {e} _ {c _ {i} ^ {c}} | | _ {2} ^ {2} + \\alpha | | \\mathbf {r} _ {i - 1} ^ {c} - \\mathrm {s g} [ \\mathbf {e} _ {c _ {i} ^ {c}} ] | | _ {2} ^ {2},", + "image_path": "95093f04c6644bbe7aecaf5f919e51994d54ed28757e7a3491da4d26fd377e17.jpg" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 340, + 455, + 440, + 469 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 455, + 440, + 469 + ], + "spans": [ + { + "bbox": [ + 340, + 455, + 440, + 469 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {R Q}} = \\mathcal {L} _ {\\mathrm {R Q}} ^ {m} + \\mathcal {L} _ {\\mathrm {R Q}} ^ {s} + \\mathcal {L} _ {\\mathrm {R Q}} ^ {c},", + "image_path": "79113b0abda90b910446c480a05b8133424858f455357ba8d282c5426bd427bb.jpg" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 313, + 474, + 559, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 474, + 559, + 517 + ], + "spans": [ + { + "bbox": [ + 313, + 474, + 559, + 517 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 474, + 559, + 517 + ], + "type": "inline_equation", + "content": "\\mathrm{sg}[\\cdot ]" + }, + { + "bbox": [ + 313, + 474, + 559, + 517 + ], + "type": "text", + "content": " denotes the stop-gradient operation and " + }, + { + "bbox": [ + 313, + 474, + 559, + 517 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 474, + 559, + 517 + ], + "type": "text", + "content": " is a hyperparameter. " + }, + { + "bbox": [ + 313, + 474, + 559, + 517 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{RQ}}" + }, + { + "bbox": [ + 313, + 474, + 559, + 517 + ], + "type": "text", + "content": " is used to train the code embeddings in both the shared and specific codebooks. Finally, the total loss for training the identifier is as follows:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 387, + 524, + 558, + 536 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 387, + 524, + 558, + 536 + ], + "spans": [ + { + "bbox": [ + 387, + 524, + 558, + 536 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {R Q - V A E}} = \\mathcal {L} _ {\\mathrm {R e c o n}} + \\mathcal {L} _ {\\mathrm {R Q}}. \\tag {10}", + "image_path": "4c0d5a8c826b9a848953e41dba99e0e6c1b976a8c50d336b47c5cec0bf95c750.jpg" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 314, + 541, + 559, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 541, + 559, + 631 + ], + "spans": [ + { + "bbox": [ + 314, + 541, + 559, + 631 + ], + "type": "text", + "content": "3.2.4 Behavior-aware Identifier. After learning the semantic and collaborative identifiers for each item, we can represent each user interaction " + }, + { + "bbox": [ + 314, + 541, + 559, + 631 + ], + "type": "inline_equation", + "content": "(b_{i},x_{i})" + }, + { + "bbox": [ + 314, + 541, + 559, + 631 + ], + "type": "text", + "content": " as shown in Eq. (1). To help the model understand different behaviors in the user's interaction history, we preprocess a token indicating the behavior type to each interaction's identifier. For interactions involving items, we preprocess the corresponding behavior token to the identifier of each item. For interactions involving queries, we preprocess the behavior token to the word sequence of the query. It can be formulated as follows:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 320, + 635, + 558, + 687 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 635, + 558, + 687 + ], + "spans": [ + { + "bbox": [ + 320, + 635, + 558, + 687 + ], + "type": "interline_equation", + "content": "\\operatorname {I D} \\left(b _ {i}, x _ {i}\\right) = \\left\\{ \\begin{array}{l l} {\\left[ \\langle \\mathrm {R} _ {\\mathrm {I}} \\rangle , c _ {1} ^ {m}, c _ {2} ^ {m}, \\dots , c _ {L _ {m}} ^ {m}, c _ {1} ^ {c}, c _ {2} ^ {c}, \\dots , c _ {L _ {n}} ^ {c} \\right],} & \\text {i f} b _ {i} = \\langle \\mathrm {R} _ {\\mathrm {I}} \\rangle , \\\\ {\\left[ \\langle \\mathrm {S} _ {\\mathrm {Q}} \\rangle , w _ {1}, w _ {2}, \\dots , w _ {| q _ {i} |} \\right],} & \\text {i f} b _ {i} = \\langle \\mathrm {S} _ {\\mathrm {Q}} \\rangle , \\\\ {\\left[ \\langle \\mathrm {S} _ {\\mathrm {I}} \\rangle , c _ {1} ^ {m}, c _ {2} ^ {m}, \\dots , c _ {L _ {m}} ^ {m}, c _ {1} ^ {s}, c _ {2} ^ {s}, \\dots , c _ {L _ {n}} ^ {s} \\right],} & \\text {i f} b _ {i} = \\langle \\mathrm {S} _ {\\mathrm {I}} \\rangle , \\end{array} \\right. \\tag {11}", + "image_path": "9ef159d9206396424cc39b0b32aac40dd09c48c5a6fb6c344842ec88edc285fe.jpg" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 315, + 687, + 558, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 687, + 558, + 710 + ], + "spans": [ + { + "bbox": [ + 315, + 687, + 558, + 710 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 315, + 687, + 558, + 710 + ], + "type": "inline_equation", + "content": "\\left[w_{1},w_{2},\\dots ,w_{|q_{i}}\\right]" + }, + { + "bbox": [ + 315, + 687, + 558, + 710 + ], + "type": "text", + "content": " are the words of query " + }, + { + "bbox": [ + 315, + 687, + 558, + 710 + ], + "type": "inline_equation", + "content": "q_{i}" + }, + { + "bbox": [ + 315, + 687, + 558, + 710 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 315, + 687, + 558, + 710 + ], + "type": "inline_equation", + "content": "\\mathrm{ID}(\\cdot)" + }, + { + "bbox": [ + 315, + 687, + 558, + 710 + ], + "type": "text", + "content": " denotes the function for obtaining the identifier of each interaction." + } + ] + } + ], + "index": 33 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 225, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 225, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 225, + 69 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "type": "text", + "content": "Teng Shi et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 83, + 294, + 96 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 83, + 294, + 96 + ], + "spans": [ + { + "bbox": [ + 51, + 83, + 294, + 96 + ], + "type": "text", + "content": "3.3 Joint Search and Recommendation Training" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 99, + 295, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 99, + 295, + 131 + ], + "spans": [ + { + "bbox": [ + 50, + 99, + 295, + 131 + ], + "type": "text", + "content": "To better adapt the LLM to joint S&R tasks, we design training objectives that help it understand user behaviors and effectively learn both semantic and collaborative identifiers." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 147, + 296, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 147, + 296, + 236 + ], + "spans": [ + { + "bbox": [ + 50, + 147, + 296, + 236 + ], + "type": "text", + "content": "3.3.1 Next Recommendation Item Prediction. To enable the LLM to perform well on the recommendation task, we let it predict the next recommended item. Unlike previous generative recommendation models [11, 26, 56] that only use the user's recommendation history, our approach incorporates search history as well. This allows the LLM to better leverage the user's historical information and understand the relationship between S&R behaviors. A sample of the data is shown below:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 253, + 212, + 263 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 253, + 212, + 263 + ], + "spans": [ + { + "bbox": [ + 67, + 253, + 212, + 263 + ], + "type": "text", + "content": "Next Recommendation Item Prediction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 66, + 272, + 279, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 272, + 279, + 284 + ], + "spans": [ + { + "bbox": [ + 66, + 272, + 279, + 284 + ], + "type": "text", + "content": "Instruction: Below is the user's interaction history: " + }, + { + "bbox": [ + 66, + 272, + 279, + 284 + ], + "type": "inline_equation", + "content": "\\langle S_0\\rangle" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 284, + 279, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 284, + 279, + 294 + ], + "spans": [ + { + "bbox": [ + 67, + 284, + 279, + 294 + ], + "type": "text", + "content": "Piano; " + }, + { + "bbox": [ + 67, + 284, + 279, + 294 + ], + "type": "inline_equation", + "content": "\\langle S_I\\rangle < M_{1 - }247 > < M_{2 - }197 > < S_{1 - }184 > < S_{2 - }110>" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 294, + 279, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 294, + 279, + 305 + ], + "spans": [ + { + "bbox": [ + 67, + 294, + 279, + 305 + ], + "type": "text", + "content": "...; " + }, + { + "bbox": [ + 67, + 294, + 279, + 305 + ], + "type": "inline_equation", + "content": "\\langle R_I\\rangle" + }, + { + "bbox": [ + 67, + 294, + 279, + 305 + ], + "type": "text", + "content": " ." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 306, + 279, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 306, + 279, + 316 + ], + "spans": [ + { + "bbox": [ + 67, + 306, + 279, + 316 + ], + "type": "text", + "content": "Please recommend the next item the user is likely to click." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 317, + 268, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 317, + 268, + 327 + ], + "spans": [ + { + "bbox": [ + 67, + 317, + 268, + 327 + ], + "type": "text", + "content": "Response: " + }, + { + "bbox": [ + 67, + 317, + 268, + 327 + ], + "type": "inline_equation", + "content": "\\langle \\mathrm{R_I}\\rangle < \\mathrm{M_{1\\_}10} > < \\mathrm{M_{2\\_}25} > < \\mathrm{R_{1\\_}52} > < \\mathrm{R_{2\\_}37}>" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 348, + 295, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 348, + 295, + 392 + ], + "spans": [ + { + "bbox": [ + 50, + 348, + 295, + 392 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 50, + 348, + 295, + 392 + ], + "type": "inline_equation", + "content": "\\langle \\mathrm{M}_{1}10\\rangle < \\mathrm{M}_{2}25\\rangle" + }, + { + "bbox": [ + 50, + 348, + 295, + 392 + ], + "type": "text", + "content": " represents the shared semantic and collaborative identifier of the item, " + }, + { + "bbox": [ + 50, + 348, + 295, + 392 + ], + "type": "inline_equation", + "content": "\\langle \\mathrm{S}_{1}184\\rangle < \\mathrm{S}_{2}110\\rangle" + }, + { + "bbox": [ + 50, + 348, + 295, + 392 + ], + "type": "text", + "content": " represents the semantic-specific identifier, and " + }, + { + "bbox": [ + 50, + 348, + 295, + 392 + ], + "type": "inline_equation", + "content": "\\langle \\mathrm{R}_{1}52\\rangle < \\mathrm{R}_{2}37\\rangle" + }, + { + "bbox": [ + 50, + 348, + 295, + 392 + ], + "type": "text", + "content": " represents the collaborative-specific identifier." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 408, + 295, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 408, + 295, + 496 + ], + "spans": [ + { + "bbox": [ + 50, + 408, + 295, + 496 + ], + "type": "text", + "content": "3.3.2 Next Search Query Prediction. Some works focus on query recommendation [4, 12, 39], where they predict the next query a user is likely to search. Since our user interaction history also includes search queries, we introduce a task that allows the LLM to predict the user's next intended search query based on their history. This helps the model better understand user search intent and the relationship between S&R behaviors. A sample of the data for this task is as follows:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 510, + 179, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 510, + 179, + 521 + ], + "spans": [ + { + "bbox": [ + 67, + 510, + 179, + 521 + ], + "type": "text", + "content": "Next Search Query Prediction" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 66, + 530, + 279, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 530, + 279, + 542 + ], + "spans": [ + { + "bbox": [ + 66, + 530, + 279, + 542 + ], + "type": "text", + "content": "Instruction: Below is the user's interaction history: " + }, + { + "bbox": [ + 66, + 530, + 279, + 542 + ], + "type": "inline_equation", + "content": "\\langle \\mathbb{R}_{\\mathrm{I}}\\rangle" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 66, + 542, + 279, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 542, + 279, + 552 + ], + "spans": [ + { + "bbox": [ + 66, + 542, + 279, + 552 + ], + "type": "inline_equation", + "content": "< \\mathrm{M}_{1 - }199>" + }, + { + "bbox": [ + 66, + 542, + 279, + 552 + ], + "type": "inline_equation", + "content": "< \\mathrm{M}_{2 - }175>" + }, + { + "bbox": [ + 66, + 542, + 279, + 552 + ], + "type": "inline_equation", + "content": "< \\mathrm{R}_{1 - }1>" + }, + { + "bbox": [ + 66, + 542, + 279, + 552 + ], + "type": "inline_equation", + "content": "< \\mathrm{R}_{2 - }44>" + }, + { + "bbox": [ + 66, + 542, + 279, + 552 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 66, + 542, + 279, + 552 + ], + "type": "inline_equation", + "content": "\\langle \\mathrm{R_I}\\rangle < \\mathrm{M}_{1 - }209>" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 66, + 553, + 279, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 553, + 279, + 563 + ], + "spans": [ + { + "bbox": [ + 66, + 553, + 279, + 563 + ], + "type": "inline_equation", + "content": "\\langle \\mathsf{M}_2 - 235\\rangle" + }, + { + "bbox": [ + 66, + 553, + 279, + 563 + ], + "type": "text", + "content": " ;...;" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 66, + 564, + 279, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 564, + 279, + 586 + ], + "spans": [ + { + "bbox": [ + 66, + 564, + 279, + 586 + ], + "type": "inline_equation", + "content": "\\langle \\mathsf{M}_{2\\_68}\\rangle < \\mathsf{R}_{1\\_118}\\rangle < \\mathsf{R}_{2\\_85}\\rangle" + }, + { + "bbox": [ + 66, + 564, + 279, + 586 + ], + "type": "text", + "content": " . Please predict the next query the user might want to search." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 586, + 209, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 586, + 209, + 597 + ], + "spans": [ + { + "bbox": [ + 67, + 586, + 209, + 597 + ], + "type": "text", + "content": "Response: " + }, + { + "bbox": [ + 67, + 586, + 209, + 597 + ], + "type": "inline_equation", + "content": "\\langle S_{\\mathrm{Q}}\\rangle" + }, + { + "bbox": [ + 67, + 586, + 209, + 597 + ], + "type": "text", + "content": " Artificial Intelligence" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 50, + 622, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 622, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 622, + 295, + 710 + ], + "type": "text", + "content": "3.3.3 Next Search Item Prediction. To enable the model to perform well on the search task, we have it predict the next search item. Previous generative search models [35, 59] only input the user's query into the LLM to predict the target item, which considers only the correlation between the query and the item, without taking the user's preferences into account. To address this, we include the user's S&R history in the input to reflect their preferences. A sample of the data for this task is as follows:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 331, + 85, + 437, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 85, + 437, + 95 + ], + "spans": [ + { + "bbox": [ + 331, + 85, + 437, + 95 + ], + "type": "text", + "content": "Next Search Item Prediction" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 330, + 104, + 544, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 104, + 544, + 116 + ], + "spans": [ + { + "bbox": [ + 330, + 104, + 544, + 116 + ], + "type": "text", + "content": "Instruction: Below is the user's interaction history: " + }, + { + "bbox": [ + 330, + 104, + 544, + 116 + ], + "type": "inline_equation", + "content": "\\langle \\mathsf{R}_{\\mathsf{I}}\\rangle" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 331, + 116, + 544, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 116, + 544, + 126 + ], + "spans": [ + { + "bbox": [ + 331, + 116, + 544, + 126 + ], + "type": "inline_equation", + "content": "< \\mathrm{M}_{1 - }199>" + }, + { + "bbox": [ + 331, + 116, + 544, + 126 + ], + "type": "inline_equation", + "content": "< \\mathrm{M}_{2 - }175>" + }, + { + "bbox": [ + 331, + 116, + 544, + 126 + ], + "type": "inline_equation", + "content": "< \\mathrm{R}_{1 - }1>" + }, + { + "bbox": [ + 331, + 116, + 544, + 126 + ], + "type": "inline_equation", + "content": "< \\mathrm{R}_{2 - }44>" + }, + { + "bbox": [ + 331, + 116, + 544, + 126 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 331, + 116, + 544, + 126 + ], + "type": "inline_equation", + "content": "\\langle \\mathrm{R_I}\\rangle < \\mathrm{M}_{1 - }209>" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 331, + 127, + 544, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 127, + 544, + 137 + ], + "spans": [ + { + "bbox": [ + 331, + 127, + 544, + 137 + ], + "type": "inline_equation", + "content": "\\langle \\mathsf{M}_2 - 235\\rangle" + }, + { + "bbox": [ + 331, + 127, + 544, + 137 + ], + "type": "text", + "content": " ;...;" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 331, + 138, + 543, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 138, + 543, + 171 + ], + "spans": [ + { + "bbox": [ + 331, + 138, + 543, + 171 + ], + "type": "inline_equation", + "content": "\\langle \\mathsf{M}_{2 - 68}\\rangle < \\mathsf{R}_{1 - 118}\\rangle < \\mathsf{R}_{2 - 85}\\rangle" + }, + { + "bbox": [ + 331, + 138, + 543, + 171 + ], + "type": "text", + "content": " . The user's search query is " + }, + { + "bbox": [ + 331, + 138, + 543, + 171 + ], + "type": "inline_equation", + "content": "\\langle S_0\\rangle" + }, + { + "bbox": [ + 331, + 138, + 543, + 171 + ], + "type": "text", + "content": " Artificial Intelligence. Please predict the next item the user might click." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 331, + 171, + 534, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 171, + 534, + 182 + ], + "spans": [ + { + "bbox": [ + 331, + 171, + 534, + 182 + ], + "type": "text", + "content": "Response: " + }, + { + "bbox": [ + 331, + 171, + 534, + 182 + ], + "type": "inline_equation", + "content": "\\langle S_I\\rangle < M_{1\\_}23 > < M_{2\\_}42 > < S_{1\\_}126 > < S_{2\\_}73>" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 314, + 194, + 558, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 194, + 558, + 217 + ], + "spans": [ + { + "bbox": [ + 314, + 194, + 558, + 217 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 314, + 194, + 558, + 217 + ], + "type": "inline_equation", + "content": "\\langle S_Q\\rangle" + }, + { + "bbox": [ + 314, + 194, + 558, + 217 + ], + "type": "text", + "content": " Artificial Intelligence\" denotes the query that the user is currently searching for." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 314, + 222, + 559, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 222, + 559, + 288 + ], + "spans": [ + { + "bbox": [ + 314, + 222, + 559, + 288 + ], + "type": "text", + "content": "3.3.4 Identifier-Language Alignment. To enhance the LLM's understanding of both the collaborative and semantic identifiers of each item, we designed an identifier-language alignment task. This task enables the LLM to generate a corresponding description based on an item's identifier and, conversely, to generate the appropriate identifier from the item's description." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 314, + 289, + 559, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 289, + 559, + 311 + ], + "spans": [ + { + "bbox": [ + 314, + 289, + 559, + 311 + ], + "type": "text", + "content": "First, we have the Desc2ID task, which enables the LLM to generate the corresponding item identifier based on its description." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 331, + 319, + 364, + 328 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 319, + 364, + 328 + ], + "spans": [ + { + "bbox": [ + 331, + 319, + 364, + 328 + ], + "type": "text", + "content": "Desc2ID" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 330, + 338, + 543, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 338, + 543, + 360 + ], + "spans": [ + { + "bbox": [ + 330, + 338, + 543, + 360 + ], + "type": "text", + "content": "Instruction: Using the provided description \"Apple MacBook Air\", predict the corresponding item." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 331, + 361, + 517, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 361, + 517, + 371 + ], + "spans": [ + { + "bbox": [ + 331, + 361, + 517, + 371 + ], + "type": "text", + "content": "Response: " + }, + { + "bbox": [ + 331, + 361, + 517, + 371 + ], + "type": "inline_equation", + "content": "<\\mathrm{M}_{1-135}> <\\mathrm{M}_{2-19}> <\\mathrm{S}_{1-41}> <\\mathrm{S}_{2-65}>" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 314, + 384, + 559, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 384, + 559, + 407 + ], + "spans": [ + { + "bbox": [ + 314, + 384, + 559, + 407 + ], + "type": "text", + "content": "Then, we have the ID2Desc task, which enables the LLM to generate the corresponding item description based on its identifier." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 331, + 415, + 364, + 425 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 415, + 364, + 425 + ], + "spans": [ + { + "bbox": [ + 331, + 415, + 364, + 425 + ], + "type": "text", + "content": "ID2Desc" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 330, + 434, + 543, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 434, + 543, + 456 + ], + "spans": [ + { + "bbox": [ + 330, + 434, + 543, + 456 + ], + "type": "text", + "content": "Instruction: Please provide a description for the item " + }, + { + "bbox": [ + 330, + 434, + 543, + 456 + ], + "type": "inline_equation", + "content": "\\langle \\mathsf{M}_1 - 135\\rangle \\langle \\mathsf{M}_2 - 19\\rangle \\langle \\mathsf{S}_1 - 41\\rangle \\langle \\mathsf{S}_2 - 65\\rangle" + }, + { + "bbox": [ + 330, + 434, + 543, + 456 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 331, + 456, + 451, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 456, + 451, + 467 + ], + "spans": [ + { + "bbox": [ + 331, + 456, + 451, + 467 + ], + "type": "text", + "content": "Response: Apple MacBook Air." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 314, + 480, + 559, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 480, + 559, + 525 + ], + "spans": [ + { + "bbox": [ + 314, + 480, + 559, + 525 + ], + "type": "text", + "content": "Please note that for both semantic and collaborative identifiers, we include the Desc2ID and ID2Desc training tasks. Since the input and output of these two tasks do not involve user history, we do not pretend a token indicating the behavior type to the identifier." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 315, + 533, + 457, + 546 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 533, + 457, + 546 + ], + "spans": [ + { + "bbox": [ + 315, + 533, + 457, + 546 + ], + "type": "text", + "content": "3.4 Training and Inference" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 313, + 548, + 559, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 548, + 559, + 593 + ], + "spans": [ + { + "bbox": [ + 313, + 548, + 559, + 593 + ], + "type": "text", + "content": "This section introduces how to train the LLM for joint S&R, and how to use the trained LLM during inference to generate the target item for either the search or recommendation task. The training and inference process of GenSAR is shown in Figure 3." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 314, + 597, + 559, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 597, + 559, + 652 + ], + "spans": [ + { + "bbox": [ + 314, + 597, + 559, + 652 + ], + "type": "text", + "content": "3.4.1 Training. As previously mentioned, each interaction in the user's history is represented as an identifier, allowing us to formulate the task as a sequence-to-sequence problem. We train the model using next token prediction, optimizing the negative log-likelihood of generating the target as follows:" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 382, + 656, + 558, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 382, + 656, + 558, + 685 + ], + "spans": [ + { + "bbox": [ + 382, + 656, + 558, + 685 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = - \\sum_ {t = 1} ^ {T} \\log P \\left(y _ {t} \\mid y _ {< t}, \\text {I n s}\\right). \\tag {12}", + "image_path": "1c7094567f67d298be0fb6a01b3baf82c4ade3cf31f329fbe9471a91e92cf859.jpg" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "type": "text", + "content": " represents the behavior-aware identifier of the target to be predicted, as defined in Eq. (11). " + }, + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "type": "text", + "content": " is the length of the identifier of" + } + ] + } + ], + "index": 41 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 195, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 195, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 195, + 68 + ], + "type": "text", + "content": "Unified Generative Search and Recommendation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 386, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 386, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 386, + 60, + 559, + 69 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 53, + 121, + 291, + 212 + ], + "blocks": [ + { + "bbox": [ + 50, + 83, + 297, + 117 + ], + "lines": [ + { + "bbox": [ + 50, + 83, + 297, + 117 + ], + "spans": [ + { + "bbox": [ + 50, + 83, + 297, + 117 + ], + "type": "text", + "content": "Table 1: Comparison of different generative search or recommendation methods. \"S.\" and \"R.\" denote search and recommendation respectively." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 121, + 291, + 212 + ], + "lines": [ + { + "bbox": [ + 53, + 121, + 291, + 212 + ], + "spans": [ + { + "bbox": [ + 53, + 121, + 291, + 212 + ], + "type": "table", + "html": "
MethodsScaleBackboneTaskIdentifier
S.R.SemanticCollaborative
P5 [11, 17]60M/220MT5-small/T5-baseXX
TIGER [26]60MT5-smallXX
LC-Rec [56]7BLLaMAXX
DSI-QG[59]220MT5-baseXX
WebUltron [58]220MT5-baseXX
GenRet [33]220MT5-baseXX
GenSAR (Ours)60MT5-small
", + "image_path": "75c269323e7a69b60fb9e7112ba8de4ac044bc997681bbbf5853b88f1e5eda9c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 219, + 295, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 219, + 295, + 243 + ], + "spans": [ + { + "bbox": [ + 50, + 219, + 295, + 243 + ], + "type": "text", + "content": "the target item. Ins refers to the various instructions described in Section 3.3, which are used as inputs for the LLM." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 247, + 295, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 247, + 295, + 368 + ], + "spans": [ + { + "bbox": [ + 50, + 247, + 295, + 368 + ], + "type": "text", + "content": "3.4.2 Inference. During training, we train the LLM according to the input-output format described in Section 3.3. During inference, to apply the LLM to search and recommendation tasks, we append a behavior token, either “" + }, + { + "bbox": [ + 50, + 247, + 295, + 368 + ], + "type": "inline_equation", + "content": "\\langle S_I \\rangle" + }, + { + "bbox": [ + 50, + 247, + 295, + 368 + ], + "type": "text", + "content": "” for search or “" + }, + { + "bbox": [ + 50, + 247, + 295, + 368 + ], + "type": "inline_equation", + "content": "\\langle R_I \\rangle" + }, + { + "bbox": [ + 50, + 247, + 295, + 368 + ], + "type": "text", + "content": "” for recommendation, to the input of the LLM to prompt it to generate the corresponding next item for search or recommendation, respectively. The other tasks mentioned in Section 3.3 are used as auxiliary tasks during training to help the model better understand user S&R behaviors. During generation, to ensure that the items generated by the LLM are within the candidate set, we follow previous works [17, 56] and use constrained beam search." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 377, + 133, + 388 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 377, + 133, + 388 + ], + "spans": [ + { + "bbox": [ + 51, + 377, + 133, + 388 + ], + "type": "text", + "content": "3.5 Discussion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 392, + 295, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 392, + 295, + 468 + ], + "spans": [ + { + "bbox": [ + 50, + 392, + 295, + 468 + ], + "type": "text", + "content": "As shown in Table 1, we compare GenSAR with various generative search or recommendation methods in terms of scale (number of parameters), backbone architecture used, and applicable tasks. GenSAR adopts T5-small as its backbone, resulting in a relatively small number of parameters while being capable of serving both S&R tasks. Compared with existing methods, it achieves an optimal balance between efficiency and effectiveness." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 469, + 295, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 469, + 295, + 524 + ], + "spans": [ + { + "bbox": [ + 50, + 469, + 295, + 524 + ], + "type": "text", + "content": "In terms of novelty, unlike existing methods that focus solely on either semantic or collaborative information in identifier design, our approach incorporates both the semantic information required for search and the collaborative signals essential for recommendation. This joint consideration helps alleviate the trade-off between S&R." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 533, + 134, + 545 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 533, + 134, + 545 + ], + "spans": [ + { + "bbox": [ + 51, + 533, + 134, + 545 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 548, + 295, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 548, + 295, + 559 + ], + "spans": [ + { + "bbox": [ + 50, + 548, + 295, + 559 + ], + "type": "text", + "content": "We conducted experiments to evaluate the performance of GenSAR." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 568, + 178, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 568, + 178, + 582 + ], + "spans": [ + { + "bbox": [ + 51, + 568, + 178, + 582 + ], + "type": "text", + "content": "4.1 Experimental Setup" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 583, + 301, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 583, + 301, + 661 + ], + "spans": [ + { + "bbox": [ + 50, + 583, + 301, + 661 + ], + "type": "text", + "content": "4.1.1 Dataset. We conducted experiments on the following datasets: (1) Amazon" + }, + { + "bbox": [ + 50, + 583, + 301, + 661 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 50, + 583, + 301, + 661 + ], + "type": "text", + "content": " [13, 24]: Following previous works [2, 3, 29, 31], we use the semi-synthetic dataset based on Amazon recommendation data as the public dataset for our experiments." + }, + { + "bbox": [ + 50, + 583, + 301, + 661 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 50, + 583, + 301, + 661 + ], + "type": "text", + "content": " (2) Commercial: To thoroughly evaluate the effectiveness of GenSAR, we collected a dataset from a Chinese commercial app, containing S&R interactions from 10,000 users over two weeks. For details on data" + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 318, + 109, + 555, + 147 + ], + "blocks": [ + { + "bbox": [ + 314, + 83, + 558, + 105 + ], + "lines": [ + { + "bbox": [ + 314, + 83, + 558, + 105 + ], + "spans": [ + { + "bbox": [ + 314, + 83, + 558, + 105 + ], + "type": "text", + "content": "Table 2: Statistics of the datasets used in this paper. \"S\" and \"R\" denote search and recommendation, respectively." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 109, + 555, + 147 + ], + "lines": [ + { + "bbox": [ + 318, + 109, + 555, + 147 + ], + "spans": [ + { + "bbox": [ + 318, + 109, + 555, + 147 + ], + "type": "table", + "html": "
Dataset#Users#Items#Queries#Interaction-R#Interaction-S
Amazon192,40362,8839831,266,9031,081,934
Commercial10,000782,225135,2064,286,866383,465
", + "image_path": "2492eabca5dc294d37215f26e8b18bb0004c6adcb54dc3e00c8d92abb5dbddc4.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 169, + 558, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 169, + 558, + 190 + ], + "spans": [ + { + "bbox": [ + 314, + 169, + 558, + 190 + ], + "type": "text", + "content": "processing and train/validation/test splitting, please see the code link." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 201, + 558, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 201, + 558, + 222 + ], + "spans": [ + { + "bbox": [ + 314, + 201, + 558, + 222 + ], + "type": "text", + "content": "4.1.2 Baselines. In this work, we use the following representative methods as baselines for comparison with GenSAR." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 223, + 560, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 223, + 560, + 333 + ], + "spans": [ + { + "bbox": [ + 313, + 223, + 560, + 333 + ], + "type": "text", + "content": "First, we compare with the following recommendation models: (1) Sequential Recommendation: GRU4Rec [16]; SASRec [19]; FMLP-Rec [57]; LRURec [45]. (2) Generative Recommendation: P5-CID [11, 17]; TIGER [26]; LC-Rec [56]. Next, we compare with the following search models: (1) Personalized Search: QEM [2]; TEM [6]; CoPPS [7]. (2) Dense Retrieval: E5³ [36]; BGE⁴ [40]. (3) Generative Retrieval: DSI-QG [59]; WebUltron [58]; GenRet [33]. Finally, we compare with the following joint S&R models: JSR [46]; SESRec [31]; UnifiedSSR [41]; UniSAR [29]. For more details on the baselines, please see the code link." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 314, + 342, + 560, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 342, + 560, + 418 + ], + "spans": [ + { + "bbox": [ + 314, + 342, + 560, + 418 + ], + "type": "text", + "content": "4.1.3 Evaluation Metrics & Implementation Details. Following previous works [29, 31, 57], we use ranking metrics including top-" + }, + { + "bbox": [ + 314, + 342, + 560, + 418 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 314, + 342, + 560, + 418 + ], + "type": "text", + "content": " Hit Ratio (HR) and top-" + }, + { + "bbox": [ + 314, + 342, + 560, + 418 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 314, + 342, + 560, + 418 + ], + "type": "text", + "content": " Normalized Discounted Cumulative Gain (NDCG). We report the results for " + }, + { + "bbox": [ + 314, + 342, + 560, + 418 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 314, + 342, + 560, + 418 + ], + "type": "text", + "content": " values of " + }, + { + "bbox": [ + 314, + 342, + 560, + 418 + ], + "type": "inline_equation", + "content": "\\{1, 5, 10\\}" + }, + { + "bbox": [ + 314, + 342, + 560, + 418 + ], + "type": "text", + "content": ", and since NDCG@1 is the same as HR@1, we do not report it. For more details on the evaluation and model implementation, please see the code link." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 315, + 432, + 447, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 432, + 447, + 443 + ], + "spans": [ + { + "bbox": [ + 315, + 432, + 447, + 443 + ], + "type": "text", + "content": "4.2 Overall Performance" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 314, + 447, + 560, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 447, + 560, + 468 + ], + "spans": [ + { + "bbox": [ + 314, + 447, + 560, + 468 + ], + "type": "text", + "content": "Table 3 and Table 4 show the S&R results on two datasets, respectively. From the results, we can observe that:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 314, + 469, + 560, + 655 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 314, + 469, + 560, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 469, + 560, + 523 + ], + "spans": [ + { + "bbox": [ + 314, + 469, + 560, + 523 + ], + "type": "text", + "content": "- Firstly, it can be seen that compared to existing search or recommendation models, GenSAR achieves state-of-the-art results. This validates the effectiveness of GenSAR in alleviating the trade-off between S&R through generative retrieval, by designing joint identifiers and training tasks for both tasks." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 314, + 523, + 560, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 523, + 560, + 589 + ], + "spans": [ + { + "bbox": [ + 314, + 523, + 560, + 589 + ], + "type": "text", + "content": "- Secondly, we can observe that most joint S&R methods (e.g., JSR, UniSAR, GenSAR) outperform traditional methods that using only item IDs, such as sequential recommendation (e.g., SASRec, FMLP-Rec) and personalized search methods (e.g., QEM, TEM, CoPPS). This demonstrates the advantages of jointly modeling of S&R, as it enhances the performance of both tasks." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 314, + 590, + 560, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 590, + 560, + 655 + ], + "spans": [ + { + "bbox": [ + 314, + 590, + 560, + 655 + ], + "type": "text", + "content": "- Thirdly, it can be observed that for search, dense retrieval (e.g., E5, BGE) and generative retrieval (e.g., GenRet, GenSAR) methods that rely on semantic information outperform personalized search models (e.g., QEM, TEM, CoPPS) that rely solely on ID information. This also confirms that for search, semantic information is more important than collaborative information." + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 225, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 225, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 225, + 69 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "type": "text", + "content": "Teng Shi et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 666, + 295, + 685 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 666, + 295, + 685 + ], + "spans": [ + { + "bbox": [ + 50, + 666, + 295, + 685 + ], + "type": "text", + "content": "1https://cseweb.ucsd.edu/~jmcauley/datasets/amazon/links.html, https://github.com/QingyaoAi/Amazon-Product-Search-Datasets" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 50, + 685, + 294, + 709 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 685, + 294, + 709 + ], + "spans": [ + { + "bbox": [ + 50, + 685, + 294, + 709 + ], + "type": "text", + "content": "2Please note that " + }, + { + "bbox": [ + 50, + 685, + 294, + 709 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 50, + 685, + 294, + 709 + ], + "type": "text", + "content": " of the items in the \"Kindle Store\" subset used in previous works [29, 31] lack textual information, so we use the \"Electronics\" subset, where less than " + }, + { + "bbox": [ + 50, + 685, + 294, + 709 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 50, + 685, + 294, + 709 + ], + "type": "text", + "content": " of the items lack text." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 682, + 559, + 709 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 682, + 559, + 709 + ], + "spans": [ + { + "bbox": [ + 314, + 682, + 559, + 709 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 314, + 682, + 559, + 709 + ], + "type": "text", + "content": "https://huggingface.co/intfloat/multilingual-e5-base \n" + }, + { + "bbox": [ + 314, + 682, + 559, + 709 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 314, + 682, + 559, + 709 + ], + "type": "text", + "content": "https://huggingface.co/BAAI/bge-base-en-v1.5, https://huggingface.co/BAAI/bge-base-zh-v1.5" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 56, + 134, + 552, + 259 + ], + "blocks": [ + { + "bbox": [ + 50, + 82, + 560, + 127 + ], + "lines": [ + { + "bbox": [ + 50, + 82, + 560, + 127 + ], + "spans": [ + { + "bbox": [ + 50, + 82, + 560, + 127 + ], + "type": "text", + "content": "Table 3: The recommendation performance of different methods on the two datasets. The best and the second-best methods are highlighted in bold and underlined fonts, respectively. The improvements over the second-best methods are statistically significant (t-test, " + }, + { + "bbox": [ + 50, + 82, + 560, + 127 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 50, + 82, + 560, + 127 + ], + "type": "text", + "content": "-value " + }, + { + "bbox": [ + 50, + 82, + 560, + 127 + ], + "type": "inline_equation", + "content": "< 0.05" + }, + { + "bbox": [ + 50, + 82, + 560, + 127 + ], + "type": "text", + "content": "). Following commonly used settings [29, 31, 57], we pair the ground-truth item with 99 randomly sampled items that the user has not interacted with to form the candidate list." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 56, + 134, + 552, + 259 + ], + "lines": [ + { + "bbox": [ + 56, + 134, + 552, + 259 + ], + "spans": [ + { + "bbox": [ + 56, + 134, + 552, + 259 + ], + "type": "table", + "html": "
DatasetsMetricsRecommendationJoint Search and Recommendation
GRU4RecSASRecFMLP-RecLRURecP5-CIDTIGERLC-RecJSRSESRecUnifiedSSRUniSARGenSAR
AmazonHR@10.04400.05440.05340.05440.08810.10730.10630.06570.06270.04770.06800.1261
HR@50.17160.18870.18980.18900.18740.20460.19730.20750.20830.16670.21710.2228
HR@100.28840.29920.30410.30010.27900.28520.27600.31880.32090.27070.33190.3063
NDCG@50.10740.12160.12170.12180.13800.15650.15220.13710.13590.10710.14320.1748
NDCG@100.14490.15710.15840.15750.16740.18240.17740.17290.17210.14050.18020.2015
CommercialHR@10.10220.15190.14420.13630.28430.26300.27030.15760.18900.15150.22140.2997
HR@50.25260.28120.27110.26370.33050.30130.30010.26850.28450.28440.32280.3496
HR@100.35270.37160.35840.35250.38300.34480.33330.35290.36900.38700.40560.4031
NDCG@50.17870.21790.20930.20210.30720.28190.28490.21420.23700.21950.27270.3241
NDCG@100.21100.24700.23730.23060.32400.29580.29550.24130.26410.25240.29930.3411
", + "image_path": "76915c600d59c0426bebb18d152b3f3340735ab970484cea05e5b3b7c1a13978.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 56, + 313, + 552, + 443 + ], + "blocks": [ + { + "bbox": [ + 50, + 261, + 560, + 307 + ], + "lines": [ + { + "bbox": [ + 50, + 261, + 560, + 307 + ], + "spans": [ + { + "bbox": [ + 50, + 261, + 560, + 307 + ], + "type": "text", + "content": "Table 4: The search performance of different methods on the two datasets. Since search relies on semantic relevance, previous works [29, 41] that randomly sample negatives often produce overly easy examples, leading to inflated performance and poor model differentiation. To address this, we follow prior personalized search methods [1, 9] and use BM25 [27] to retrieve 99 harder negatives, forming a candidate list with the positive sample for more accurate evaluation." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 56, + 313, + 552, + 443 + ], + "lines": [ + { + "bbox": [ + 56, + 313, + 552, + 443 + ], + "spans": [ + { + "bbox": [ + 56, + 313, + 552, + 443 + ], + "type": "table", + "html": "
DatasetsMetricsSearchJoint Search and Recommendation
QEMTEMCoPPSE5BGEDSI-QGWebUltronGenRetJSRUnifiedSSRUniSARGenSAR
AmazonHR@10.15120.08390.09430.32890.40300.35580.34320.41730.08350.07990.11220.5262
HR@50.31010.34710.33800.59450.62640.58480.54640.65130.24070.24760.31290.7529
HR@100.46570.51810.49090.72030.74750.68970.62160.73390.34630.36140.43330.8217
NDCG@50.23110.21730.21540.46620.52190.47640.45070.53990.16230.16620.21430.6485
NDCG@100.28090.27220.26470.50690.56130.51030.47480.56670.19620.20280.25330.6710
CommercialHR@10.03110.03280.02650.12770.12670.10160.08040.11710.02730.01190.05110.1249
HR@50.08700.11060.09980.31080.31840.28310.26190.33200.12020.04700.18100.3655
HR@100.15390.19250.17920.40440.41940.41320.39920.46660.21370.08730.32310.5250
NDCG@50.05860.07150.06260.22300.22580.19400.17210.22730.07280.02920.11440.2472
NDCG@100.07990.09770.08800.25330.25840.23590.21640.27080.10260.04200.15970.2987
", + "image_path": "c149eb9030dbb2180ada8f29499c99c7af59ffbadfa06650df7389e5df9e316a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 449, + 153, + 462 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 449, + 153, + 462 + ], + "spans": [ + { + "bbox": [ + 51, + 449, + 153, + 462 + ], + "type": "text", + "content": "4.3 Ablation Study" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 464, + 295, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 464, + 295, + 496 + ], + "spans": [ + { + "bbox": [ + 50, + 464, + 295, + 496 + ], + "type": "text", + "content": "We conducted ablation study on the Commercial dataset to validate the effectiveness of the various training tasks in GenSAR, as shown in Table 5." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 497, + 295, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 497, + 295, + 583 + ], + "spans": [ + { + "bbox": [ + 50, + 497, + 295, + 583 + ], + "type": "text", + "content": "Impact of Behavior Token. As shown in Section 3.2.4, we pretended a token indicating the type of behavior to be an identifier of each user interaction, enabling the LLM to recognize different behavior types. To evaluate its impact, we removed this behavior token, as shown in Table 5 (\"w/o Behavior Token\"). The results indicate that removing the behavior token degrades performance, validating that adding this token helps the LLM better understand the relationship between user S&R behaviors." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 585, + 295, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 585, + 295, + 704 + ], + "spans": [ + { + "bbox": [ + 50, + 585, + 295, + 704 + ], + "type": "text", + "content": "Next Recommendation Item Prediction (NRIP). As shown in Section 3.3.1, we incorporated the training task \"Next Recommendation Item Prediction\" (NRIP), which enables the LLM to predict the next item to recommend based on user history. To evaluate its impact, we removed this task, as shown in Table 5 (\"w/o NRIP\"). The results demonstrate that removing this task significantly degrades recommendation performance and slightly reduces search performance, highlighting the importance of NRIP. Additionally, this demonstrates that recommendation training tasks can enhance search performance, verifying that recommendation can benefit search." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 450, + 559, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 450, + 559, + 548 + ], + "spans": [ + { + "bbox": [ + 313, + 450, + 559, + 548 + ], + "type": "text", + "content": "Next Search Query Prediction (NSQP). We included the training task \"Next Search Query Prediction\" (NSQP) to enable the LLM to better understand user intent by predicting the next query a user might want to search, as described in Section 3.3.2. To evaluate its impact, we observed the results after removing this task, as shown in Table 5 (\"w/o NSQP\"). The results indicate that removing this task significantly degrades search performance and also affects recommendation performance, demonstrating that NSQP helps the model better understand user search intent." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 549, + 559, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 549, + 559, + 658 + ], + "spans": [ + { + "bbox": [ + 313, + 549, + 559, + 658 + ], + "type": "text", + "content": "Next Search Item Prediction (NSIP). In Section 3.3.3, we introduced the training task \"Next Search Item Prediction\" (NSIP), which allows the LLM to predict the next item a user might click based on their history and input query. We analyzed the impact of this task, as shown in Table 5 (\"w/o NSIP\"). The results indicate that removing this task significantly degrades search performance, while also slightly affecting recommendation performance. This demonstrates the importance of NSIP for search and further highlights that search training tasks can enhance recommendation performance, validating that search can assist recommendation." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 658, + 559, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 658, + 559, + 702 + ], + "spans": [ + { + "bbox": [ + 313, + 658, + 559, + 702 + ], + "type": "text", + "content": "Identifier-Language Alignment. In Section 3.3.4, we introduced two tasks, Desc2ID and ID2Desc, for identifier-language alignment, which help the LLM better understand the semantic and collaborative identifiers of each item. We observed the impact of" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 195, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 195, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 195, + 68 + ], + "type": "text", + "content": "Unified Generative Search and Recommendation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 386, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 386, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 386, + 60, + 559, + 69 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 118, + 287, + 247 + ], + "blocks": [ + { + "bbox": [ + 50, + 83, + 294, + 114 + ], + "lines": [ + { + "bbox": [ + 50, + 83, + 294, + 114 + ], + "spans": [ + { + "bbox": [ + 50, + 83, + 294, + 114 + ], + "type": "text", + "content": "Table 5: Ablation study on the Commercial dataset, where \"w/o\" denotes the removal of the corresponding module in GenSAR." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 118, + 287, + 247 + ], + "lines": [ + { + "bbox": [ + 58, + 118, + 287, + 247 + ], + "spans": [ + { + "bbox": [ + 58, + 118, + 287, + 247 + ], + "type": "table", + "html": "
ModelRecommendationSearch
HR@5NDCG@5HR@5NDCG@5
GenSAR0.34960.32410.36550.2472
w/o Behavior Token0.34300.31930.32980.2224
w/o NRIP0.06650.03920.34560.2342
w/o NSQP0.34010.31630.30890.2053
w/o NSIP0.33900.31520.16680.1113
w/o Desc2ID0.34160.31880.33550.2278
w/o ID2Desc0.34580.32200.33980.2308
", + "image_path": "852f49bc31d0e7f4e1eea1e2c9a018de3829e9f64126eff2bd75879bf40fcc2b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 55, + 248, + 169, + 319 + ], + "blocks": [ + { + "bbox": [ + 55, + 248, + 169, + 319 + ], + "lines": [ + { + "bbox": [ + 55, + 248, + 169, + 319 + ], + "spans": [ + { + "bbox": [ + 55, + 248, + 169, + 319 + ], + "type": "image", + "image_path": "ecce5a0559e48049574371e9b23e1b75329fe824cc739e97fb0a009bfb585c53.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 63, + 323, + 162, + 331 + ], + "lines": [ + { + "bbox": [ + 63, + 323, + 162, + 331 + ], + "spans": [ + { + "bbox": [ + 63, + 323, + 162, + 331 + ], + "type": "text", + "content": "(a) Recommendation Performance" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 175, + 249, + 290, + 319 + ], + "blocks": [ + { + "bbox": [ + 175, + 249, + 290, + 319 + ], + "lines": [ + { + "bbox": [ + 175, + 249, + 290, + 319 + ], + "spans": [ + { + "bbox": [ + 175, + 249, + 290, + 319 + ], + "type": "image", + "image_path": "397a18df437bd5056f0bc169b0aeea0e25bdb1287fa1e6affaf38d1a3ef93475.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 199, + 323, + 268, + 331 + ], + "lines": [ + { + "bbox": [ + 199, + 323, + 268, + 331 + ], + "spans": [ + { + "bbox": [ + 199, + 323, + 268, + 331 + ], + "type": "text", + "content": "(b) Search Performance" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 58, + 357, + 287, + 445 + ], + "blocks": [ + { + "bbox": [ + 51, + 341, + 294, + 352 + ], + "lines": [ + { + "bbox": [ + 51, + 341, + 294, + 352 + ], + "spans": [ + { + "bbox": [ + 51, + 341, + 294, + 352 + ], + "type": "text", + "content": "Figure 4: Performance of GenSAR using different identifiers." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 58, + 357, + 287, + 445 + ], + "lines": [ + { + "bbox": [ + 58, + 357, + 287, + 445 + ], + "spans": [ + { + "bbox": [ + 58, + 357, + 287, + 445 + ], + "type": "image", + "image_path": "fb95df98856241a96ab0dff04372bd6259441931f9571d2e0c69523d6587464d.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 449, + 267, + 459 + ], + "lines": [ + { + "bbox": [ + 77, + 449, + 267, + 459 + ], + "spans": [ + { + "bbox": [ + 77, + 449, + 267, + 459 + ], + "type": "text", + "content": "Figure 5: Collision rate of different identifiers." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 471, + 295, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 471, + 295, + 515 + ], + "spans": [ + { + "bbox": [ + 50, + 471, + 295, + 515 + ], + "type": "text", + "content": "removing these two tasks, as shown in Table 5 (w/o \"Desc2ID\" and w/o \"ID2Desc\"). It can be seen that removing these tasks leads to a decrease in both S&R performance, indicating the effectiveness of these tasks in helping the LLM better understand item identifiers." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 525, + 192, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 525, + 192, + 537 + ], + "spans": [ + { + "bbox": [ + 51, + 525, + 192, + 537 + ], + "type": "text", + "content": "4.4 Experimental Analysis" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 50, + 539, + 294, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 539, + 294, + 561 + ], + "spans": [ + { + "bbox": [ + 50, + 539, + 294, + 561 + ], + "type": "text", + "content": "We conducted further experiments on the Commercial dataset to analyze the effectiveness of different modules in GenSAR." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 50, + 567, + 295, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 567, + 295, + 708 + ], + "spans": [ + { + "bbox": [ + 50, + 567, + 295, + 708 + ], + "type": "text", + "content": "4.4.1 Impact of Different Identifier. To balance the semantic information needed for search and the collaborative information needed for recommendation, we designed the joint S&R identifier in Section 3.2. To validate its effectiveness, we compared it with identifiers learned directly from semantic embeddings or collaborative embeddings using RQ-VAE [26, 56], as shown in Figure 4. \"Only Collaborative\" represents using only collaborative embeddings, while \"Only Semantic\" represents using only semantic embeddings. The results show that identifiers derived solely from semantic or collaborative information lead to degraded performance. Furthermore, using only collaborative information results in worse search performance, which aligns with the fact that search relies more on semantic information." + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 319, + 87, + 433, + 156 + ], + "blocks": [ + { + "bbox": [ + 319, + 87, + 433, + 156 + ], + "lines": [ + { + "bbox": [ + 319, + 87, + 433, + 156 + ], + "spans": [ + { + "bbox": [ + 319, + 87, + 433, + 156 + ], + "type": "image", + "image_path": "7eaf0981cb5f20384989aa56e36ad232812d82cdb50bb0e90f7e15e1c57b40d9.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 327, + 160, + 427, + 169 + ], + "lines": [ + { + "bbox": [ + 327, + 160, + 427, + 169 + ], + "spans": [ + { + "bbox": [ + 327, + 160, + 427, + 169 + ], + "type": "text", + "content": "(a) Recommendation Performance" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 440, + 87, + 555, + 156 + ], + "blocks": [ + { + "bbox": [ + 440, + 87, + 555, + 156 + ], + "lines": [ + { + "bbox": [ + 440, + 87, + 555, + 156 + ], + "spans": [ + { + "bbox": [ + 440, + 87, + 555, + 156 + ], + "type": "image", + "image_path": "6a62487787c0bd42a8e02f0875370a16e73635f9a6a3a3a06be3b844c3d351b4.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 463, + 160, + 533, + 169 + ], + "lines": [ + { + "bbox": [ + 463, + 160, + 533, + 169 + ], + "spans": [ + { + "bbox": [ + 463, + 160, + 533, + 169 + ], + "type": "text", + "content": "(b) Search Performance" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 314, + 178, + 558, + 210 + ], + "lines": [ + { + "bbox": [ + 314, + 178, + 558, + 210 + ], + "spans": [ + { + "bbox": [ + 314, + 178, + 558, + 210 + ], + "type": "text", + "content": "Figure 6: Performance under different numbers of shared codebooks " + }, + { + "bbox": [ + 314, + 178, + 558, + 210 + ], + "type": "inline_equation", + "content": "L_{m}" + }, + { + "bbox": [ + 314, + 178, + 558, + 210 + ], + "type": "text", + "content": ". We fix " + }, + { + "bbox": [ + 314, + 178, + 558, + 210 + ], + "type": "inline_equation", + "content": "L_{m} + L_{n} = 4" + }, + { + "bbox": [ + 314, + 178, + 558, + 210 + ], + "type": "text", + "content": " and vary " + }, + { + "bbox": [ + 314, + 178, + 558, + 210 + ], + "type": "inline_equation", + "content": "L_{m}" + }, + { + "bbox": [ + 314, + 178, + 558, + 210 + ], + "type": "text", + "content": " to observe the results." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 229, + 559, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 229, + 559, + 269 + ], + "spans": [ + { + "bbox": [ + 313, + 229, + 559, + 269 + ], + "type": "text", + "content": "4.4.2 Collision Rate of Different Identifier. Additionally, we analyzed the advantages of different identifiers from the perspective of collision rate. The formula for calculating the collision rate is as follows:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 368, + 270, + 504, + 292 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 368, + 270, + 504, + 292 + ], + "spans": [ + { + "bbox": [ + 368, + 270, + 504, + 292 + ], + "type": "interline_equation", + "content": "\\text {C o l l i s i o n R a t e} = 1 - \\frac {\\# \\text {U n i q u e I d e n s i t e r}}{\\# \\text {U n i q u e I t e m}},", + "image_path": "829817898d5e6fcd9cc0d3598c229b4c1dda91385eafc9f3cb585061267148b0.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 296, + 559, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 296, + 559, + 450 + ], + "spans": [ + { + "bbox": [ + 313, + 296, + 559, + 450 + ], + "type": "text", + "content": "where # Unique Identifier represents the number of unique identifiers, and # Unique Item represents the number of unique items. Since RQ-VAE does not guarantee a unique identifier for each item during the learning process, collisions may occur where different items share the same identifier [26, 56]. A higher collision rate can negatively impact the model's performance. From Figure 5, it can be observed that the two identifiers assigned to each item in GenSAR, incorporating both semantic and collaborative information, have a lower collision rate of " + }, + { + "bbox": [ + 313, + 296, + 559, + 450 + ], + "type": "inline_equation", + "content": "0.18\\%" + }, + { + "bbox": [ + 313, + 296, + 559, + 450 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 296, + 559, + 450 + ], + "type": "inline_equation", + "content": "0.39\\%" + }, + { + "bbox": [ + 313, + 296, + 559, + 450 + ], + "type": "text", + "content": ", respectively. In contrast, identifiers derived solely from semantic embeddings or collaborative embeddings exhibit higher collision rates of " + }, + { + "bbox": [ + 313, + 296, + 559, + 450 + ], + "type": "inline_equation", + "content": "1.37\\%" + }, + { + "bbox": [ + 313, + 296, + 559, + 450 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 296, + 559, + 450 + ], + "type": "inline_equation", + "content": "0.90\\%" + }, + { + "bbox": [ + 313, + 296, + 559, + 450 + ], + "type": "text", + "content": ", respectively. This further validates the advantage of the identifiers in GenSAR, as their lower collision rate enables the model to achieve better performance." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "spans": [ + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "text", + "content": "4.4.3 Impact of Hyper-parameters. As described in Section 3.2, we have " + }, + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "inline_equation", + "content": "L_{m}" + }, + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "text", + "content": "-level shared codebooks and " + }, + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "inline_equation", + "content": "L_{n}" + }, + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "text", + "content": "-level specific codebooks. Here, we analyze the impact of the number of shared and specific codebooks (" + }, + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "inline_equation", + "content": "L_{m}" + }, + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "inline_equation", + "content": "L_{n}" + }, + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "text", + "content": ") on the results, as shown in Figure 6. We fix " + }, + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "inline_equation", + "content": "L_{m} + L_{n} = 4" + }, + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "text", + "content": " and observe the results. It can be seen that having too few (" + }, + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "inline_equation", + "content": "L_{m} = 1" + }, + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "text", + "content": ") or too many (" + }, + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "inline_equation", + "content": "L_{m} = 3" + }, + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "text", + "content": ") shared codebooks fails to achieve strong performance in both S&R. This indicates that " + }, + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "inline_equation", + "content": "L_{m}" + }, + { + "bbox": [ + 313, + 457, + 558, + 578 + ], + "type": "text", + "content": " needs to be properly set so that the identifier can capture both the shared information between semantics and collaboration as well as their specific characteristics. Only in this way can we achieve better performance in both S&R." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 578, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 578, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 578, + 559, + 710 + ], + "type": "text", + "content": "Additionally, we analyzed the impact of identifier length on performance, as shown in Figure 7. We fix " + }, + { + "bbox": [ + 313, + 578, + 559, + 710 + ], + "type": "inline_equation", + "content": "L_{m} = 2" + }, + { + "bbox": [ + 313, + 578, + 559, + 710 + ], + "type": "text", + "content": " and vary " + }, + { + "bbox": [ + 313, + 578, + 559, + 710 + ], + "type": "inline_equation", + "content": "L_{n}" + }, + { + "bbox": [ + 313, + 578, + 559, + 710 + ], + "type": "text", + "content": " to adjust the identifier length and observe the results. It can be seen that both shorter " + }, + { + "bbox": [ + 313, + 578, + 559, + 710 + ], + "type": "inline_equation", + "content": "(L_{m} + L_{n} = 3)" + }, + { + "bbox": [ + 313, + 578, + 559, + 710 + ], + "type": "text", + "content": " and longer " + }, + { + "bbox": [ + 313, + 578, + 559, + 710 + ], + "type": "inline_equation", + "content": "(L_{m} + L_{n} = 5)" + }, + { + "bbox": [ + 313, + 578, + 559, + 710 + ], + "type": "text", + "content": " identifiers lead to performance degradation. This is because, when the identifier is too short, the identifiers learned through RQ-VAE are more prone to collisions, resulting in a higher collision rate and making it difficult for the model to distinguish between different items. On the other hand, when the identifier is too long, the model requires more decoding steps during item generation, leading to accumulated errors and ultimately deteriorating performance. Therefore, it is essential to properly set the identifier length to achieve better performance." + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 223, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 223, + 68 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 223, + 68 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 515, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 515, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 515, + 60, + 558, + 68 + ], + "type": "text", + "content": "Teng Shi et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 86, + 170, + 156 + ], + "blocks": [ + { + "bbox": [ + 55, + 86, + 170, + 156 + ], + "lines": [ + { + "bbox": [ + 55, + 86, + 170, + 156 + ], + "spans": [ + { + "bbox": [ + 55, + 86, + 170, + 156 + ], + "type": "image", + "image_path": "15322b8796533cd47d49b9fc9b63e562b17b827521e8c4717054345e22078795.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 63, + 160, + 163, + 169 + ], + "lines": [ + { + "bbox": [ + 63, + 160, + 163, + 169 + ], + "spans": [ + { + "bbox": [ + 63, + 160, + 163, + 169 + ], + "type": "text", + "content": "(a) Recommendation Performance" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 175, + 86, + 291, + 156 + ], + "blocks": [ + { + "bbox": [ + 175, + 86, + 291, + 156 + ], + "lines": [ + { + "bbox": [ + 175, + 86, + 291, + 156 + ], + "spans": [ + { + "bbox": [ + 175, + 86, + 291, + 156 + ], + "type": "image", + "image_path": "6c8e992b55df077b506a54aa95974fa63b68fb5fc6708d66c678ddb49d9a9381.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 198, + 160, + 268, + 169 + ], + "lines": [ + { + "bbox": [ + 198, + 160, + 268, + 169 + ], + "spans": [ + { + "bbox": [ + 198, + 160, + 268, + 169 + ], + "type": "text", + "content": "(b) Search Performance" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 50, + 178, + 296, + 200 + ], + "lines": [ + { + "bbox": [ + 50, + 178, + 296, + 200 + ], + "spans": [ + { + "bbox": [ + 50, + 178, + 296, + 200 + ], + "type": "text", + "content": "Figure 7: Performance under different length of the identifier. We fix " + }, + { + "bbox": [ + 50, + 178, + 296, + 200 + ], + "type": "inline_equation", + "content": "L_{m} = 2" + }, + { + "bbox": [ + 50, + 178, + 296, + 200 + ], + "type": "text", + "content": " and vary " + }, + { + "bbox": [ + 50, + 178, + 296, + 200 + ], + "type": "inline_equation", + "content": "L_{n}" + }, + { + "bbox": [ + 50, + 178, + 296, + 200 + ], + "type": "text", + "content": " to adjust the identifier length." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 216, + 127, + 226 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 216, + 127, + 226 + ], + "spans": [ + { + "bbox": [ + 51, + 216, + 127, + 226 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 230, + 296, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 230, + 296, + 373 + ], + "spans": [ + { + "bbox": [ + 50, + 230, + 296, + 373 + ], + "type": "text", + "content": "In this paper, we propose GenSAR, which unifies balanced search and recommendation through generative retrieval to alleviate the trade-off between the two tasks and improve their performance. To balance the semantic information required for search and the collaborative information needed for recommendation, we design the joint S&R identifier and different training tasks. First, we learn two identifiers for each item to represent semantic and collaborative information, respectively. These identifiers share a common part to capture the information shared between semantics and collaboration while retaining distinct parts to preserve specific information. Second, we design different training tasks to help the model better understand the requirements of S&R tasks. We also validate the effectiveness of GenSAR through extensive experiments." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 384, + 108, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 384, + 108, + 395 + ], + "spans": [ + { + "bbox": [ + 52, + 384, + 108, + 395 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 397, + 295, + 709 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 55, + 397, + 295, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 397, + 295, + 422 + ], + "spans": [ + { + "bbox": [ + 55, + 397, + 295, + 422 + ], + "type": "text", + "content": "[1] Wasi Uddin Ahmad, Kai-Wei Chang, and Hongning Wang. 2018. Multi-task learning for document ranking and query suggestion. In International conference on learning representations." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 422, + 295, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 422, + 295, + 445 + ], + "spans": [ + { + "bbox": [ + 55, + 422, + 295, + 445 + ], + "type": "text", + "content": "[2] Qingyao Ai, Daniel N Hill, SVN Vishwanathan, and W Bruce Croft. 2019. A zero attention model for personalized product search. In Proceedings of the 28th ACM International Conference on Information and Knowledge Management. 379-388." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 445, + 294, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 445, + 294, + 477 + ], + "spans": [ + { + "bbox": [ + 56, + 445, + 294, + 477 + ], + "type": "text", + "content": "[3] Qingyao Ai, Yongfeng Zhang, Keping Bi, Xu Chen, and W Bruce Croft. 2017. Learning a hierarchical embedding model for personalized product search. In Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval. 645-654." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 477, + 294, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 477, + 294, + 509 + ], + "spans": [ + { + "bbox": [ + 56, + 477, + 294, + 509 + ], + "type": "text", + "content": "[4] Jinheon Baek, Nirupama Chandrasekaran, Silviu Cucerzan, Allen Herring, and Sujay Kumar Jauhar. 2024. Knowledge-augmented large language models for personalized contextual query suggestion. In Proceedings of the ACM on Web Conference 2024. 3355-3366." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 509, + 294, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 509, + 294, + 540 + ], + "spans": [ + { + "bbox": [ + 56, + 509, + 294, + 540 + ], + "type": "text", + "content": "[5] Michele Bevilacqua, Giuseppe Ottaviano, Patrick Lewis, Scott Yih, Sebastian Riedel, and Fabio Petroni. 2022. Autoregressive search engines: Generating substrings as document identifiers. Advances in Neural Information Processing Systems 35 (2022), 31668-31683." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 540, + 294, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 540, + 294, + 572 + ], + "spans": [ + { + "bbox": [ + 56, + 540, + 294, + 572 + ], + "type": "text", + "content": "[6] Keping Bi, Qingyao Ai, and W Bruce Croft. 2020. A transformer-based embedding model for personalized product search. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 1521-1524." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 572, + 294, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 572, + 294, + 612 + ], + "spans": [ + { + "bbox": [ + 56, + 572, + 294, + 612 + ], + "type": "text", + "content": "[7] Shitong Dai, Jiongnan Liu, Zhicheng Dou, Haonan Wang, Lin Liu, Bo Long, and Ji-Rong Wen. 2023. Contrastive Learning for User Sequence Representation in Personalized Product Search. In Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, KDD 2023, Long Beach, CA, USA, August 6-10, 2023. ACM, 380-389." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 56, + 612, + 294, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 612, + 294, + 644 + ], + "spans": [ + { + "bbox": [ + 56, + 612, + 294, + 644 + ], + "type": "text", + "content": "[8] Sunhao Dai, Ninglu Shao, Haiyuan Zhao, Weijie Yu, Zihua Si, Chen Xu, Zhongxiang Sun, Xiao Zhang, and Jun Xu. 2023. Uncovering chatgpt's capabilities in recommender systems. In Proceedings of the 17th ACM Conference on Recommender Systems. 1126-1132." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 56, + 644, + 294, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 644, + 294, + 669 + ], + "spans": [ + { + "bbox": [ + 56, + 644, + 294, + 669 + ], + "type": "text", + "content": "[9] Chenlong Deng, Yujia Zhou, and Zhicheng Dou. 2022. Improving personalized search with dual-feedback network. In Proceedings of the fifteenth ACM international conference on web search and data mining, 210-218." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 53, + 669, + 294, + 709 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 669, + 294, + 709 + ], + "spans": [ + { + "bbox": [ + 53, + 669, + 294, + 709 + ], + "type": "text", + "content": "[10] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 317, + 86, + 559, + 708 + ], + "type": "list", + "angle": 0, + "index": 41, + "blocks": [ + { + "bbox": [ + 317, + 86, + 559, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 86, + 559, + 118 + ], + "spans": [ + { + "bbox": [ + 317, + 86, + 559, + 118 + ], + "type": "text", + "content": "[11] Shijie Geng, Shuchang Liu, Zuohui Fu, Yingqiang Ge, and Yongfeng Zhang. 2022. Recommendation as language processing (rlp): A unified pretrain, personalized prompt & predict paradigm (p5). In Proceedings of the 16th ACM Conference on Recommender Systems. 299-315." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 118, + 559, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 118, + 559, + 158 + ], + "spans": [ + { + "bbox": [ + 317, + 118, + 559, + 158 + ], + "type": "text", + "content": "[12] Yulong Gu, Wentian Bao, Dan Ou, Xiang Li, Baoliang Cui, Biyu Ma, Haikuan Huang, Qingwen Liu, and Xiaoyi Zeng. 2021. Self-supervised learning on users' spontaneous behaviors for multi-scenario ranking in e-commerce. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management. 3828-3837." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 158, + 559, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 158, + 559, + 182 + ], + "spans": [ + { + "bbox": [ + 317, + 158, + 559, + 182 + ], + "type": "text", + "content": "[13] Ruining He and Julian McAuley. 2016. Ups and downs: Modeling the visual evolution of fashion trends with one-class collaborative filtering. In proceedings of the 25th international conference on world wide web. 507-517." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 182, + 559, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 182, + 559, + 206 + ], + "spans": [ + { + "bbox": [ + 317, + 182, + 559, + 206 + ], + "type": "text", + "content": "[14] Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. 2017. Neural collaborative filtering. In Proceedings of the 26th international conference on world wide web. 173-182." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 206, + 559, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 206, + 559, + 246 + ], + "spans": [ + { + "bbox": [ + 317, + 206, + 559, + 246 + ], + "type": "text", + "content": "[15] Zhankui He, Handong Zhao, Zhaowen Wang, Zhe Lin, Ajinkya Kale, and Julian Mcauley. 2022. Query-Aware Sequential Recommendation. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management (Atlanta, GA, USA) (CIKM '22). Association for Computing Machinery, New York, NY, USA, 4019-4023." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 246, + 559, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 246, + 559, + 285 + ], + "spans": [ + { + "bbox": [ + 317, + 246, + 559, + 285 + ], + "type": "text", + "content": "[16] Balázs Hidasi, Alexandros Karatzoglou, Linas Baltrunas, and Domonkos Tikk. 2016. Session-based Recommendations with Recurrent Neural Networks. In 4th International Conference on Learning Representations, ICLR 2016, San Juan, Puerto Rico, May 2-4, 2016, Conference Track Proceedings, Yoshua Bengio and Yann LeCun (Eds.)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 285, + 559, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 285, + 559, + 318 + ], + "spans": [ + { + "bbox": [ + 317, + 285, + 559, + 318 + ], + "type": "text", + "content": "[17] Wenyue Hua, Shuyuan Xu, Yingqiang Ge, and Yongfeng Zhang. 2023. How to index item ids for recommendation foundation models. In Proceedings of the Annual International ACM SIGIR Conference on Research and Development in Information Retrieval in the Asia Pacific Region. 195-204." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 318, + 559, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 318, + 559, + 350 + ], + "spans": [ + { + "bbox": [ + 317, + 318, + 559, + 350 + ], + "type": "text", + "content": "[18] Gautier Izacard, Mathilde Caron, Lucas Hosseini, Sebastian Riedel, Piotr Bojanowski, Armand Joulin, and Edouard Grave. 2021. Unsupervised dense information retrieval with contrastive learning. arXiv preprint arXiv:2112.09118 (2021)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 350, + 559, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 350, + 559, + 373 + ], + "spans": [ + { + "bbox": [ + 317, + 350, + 559, + 373 + ], + "type": "text", + "content": "[19] Wang-Cheng Kang and Julian McAuley. 2018. Self-attentive sequential recommendation. In 2018 IEEE International Conference on Data Mining (ICDM). IEEE, 197-206." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 373, + 559, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 373, + 559, + 398 + ], + "spans": [ + { + "bbox": [ + 317, + 373, + 559, + 398 + ], + "type": "text", + "content": "[20] Doyup Lee, Chihuon Kim, Saehoon Kim, Minsu Cho, and Wook-Shin Han. 2022. Autoregressive image generation using residual quantization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 11523-11532." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 317, + 398, + 559, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 398, + 559, + 422 + ], + "spans": [ + { + "bbox": [ + 317, + 398, + 559, + 422 + ], + "type": "text", + "content": "[21] Xiaoxi Li, Jiajie Jin, Yujiia Zhou, Yuyao Zhang, Peitian Zhang, Yutao Zhu, and Zhicheng Dou. 2024. From matching to generation: A survey on generative information retrieval. arXiv preprint arXiv:2404.14851 (2024)." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 317, + 422, + 559, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 422, + 559, + 453 + ], + "spans": [ + { + "bbox": [ + 317, + 422, + 559, + 453 + ], + "type": "text", + "content": "[22] Yongqi Li, Nan Yang, Liang Wang, Furu Wei, and Wenjie Li. 2023. Multiview Identifiers Enhanced Generative Retrieval. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 6636-6648." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 317, + 453, + 559, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 453, + 559, + 485 + ], + "spans": [ + { + "bbox": [ + 317, + 453, + 559, + 485 + ], + "type": "text", + "content": "[23] Jiayi Liao, Sihang Li, Zhengyi Yang, Jiancan Wu, Yancheng Yuan, Xiang Wang, and Xiangnan He. 2024. Llara: Large language-recommendation assistant. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1785-1795." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 317, + 485, + 559, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 485, + 559, + 517 + ], + "spans": [ + { + "bbox": [ + 317, + 485, + 559, + 517 + ], + "type": "text", + "content": "[24] Julian McAuley, Christopher Targett, Qinfeng Shi, and Anton Van Den Hengel. 2015. Image-based recommendations on styles and substitutes. In Proceedings of the 38th international ACM SIGIR conference on research and development in information retrieval. 43-52." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 317, + 517, + 559, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 517, + 559, + 548 + ], + "spans": [ + { + "bbox": [ + 317, + 517, + 559, + 548 + ], + "type": "text", + "content": "[25] Gustavo Penha, Ali Vardasbi, Enrico Palumbo, Marco De Nadai, and Hugues Bouchard. 2024. Bridging Search and Recommendation in Generative Retrieval: Does One Task Help the Other?. In Proceedings of the 18th ACM Conference on Recommender Systems. 340-349." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 317, + 548, + 559, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 548, + 559, + 581 + ], + "spans": [ + { + "bbox": [ + 317, + 548, + 559, + 581 + ], + "type": "text", + "content": "[26] Shashank Rajput, Nikhil Mehta, Anima Singh, Raghunandan Hulikal Keshavan, Trung Vu, Lukasz Heldt, Lichan Hong, Yi Tay, Vinh Tran, Jonah Samost, et al. 2023. Recommender systems with generative retrieval. Advances in Neural Information Processing Systems 36 (2023), 10299-10315." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 317, + 581, + 559, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 581, + 559, + 605 + ], + "spans": [ + { + "bbox": [ + 317, + 581, + 559, + 605 + ], + "type": "text", + "content": "[27] Stephen Robertson, Hugo Zaragoza, et al. 2009. The probabilistic relevance framework: BM25 and beyond. Foundations and Trends® in Information Retrieval 3, 4 (2009), 333-389." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 317, + 605, + 559, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 605, + 559, + 628 + ], + "spans": [ + { + "bbox": [ + 317, + 605, + 559, + 628 + ], + "type": "text", + "content": "[28] Chenglei Shen, Xiao Zhang, Teng Shi, Changshuo Zhang, Guofu Xie, and Jun Xu. 2024. A survey of controllable learning: Methods and applications in information retrieval. arXiv preprint arXiv:2407.06083 (2024)." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 317, + 628, + 559, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 628, + 559, + 668 + ], + "spans": [ + { + "bbox": [ + 317, + 628, + 559, + 668 + ], + "type": "text", + "content": "[29] Teng Shi, Zihua Si, Jun Xu, Xiao Zhang, Xiaoxue Zang, Kai Zheng, Dewei Leng, Yanan Niu, and Yang Song. 2024. UniSAR: Modeling User Transition Behaviors between Search and Recommendation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1029-1039." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 317, + 668, + 559, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 668, + 559, + 708 + ], + "spans": [ + { + "bbox": [ + 317, + 668, + 559, + 708 + ], + "type": "text", + "content": "[30] Zihua Si, Xueran Han, Xiao Zhang, Jun Xu, Yue Yin, Yang Song, and Ji-Rong Wen. 2022. A Model-Agnostic Causal Learning Framework for Recommendation Using Search Data. In Proceedings of the ACM Web Conference 2022 (Virtual Event, Lyon, France) (WWW '22). Association for Computing Machinery, New York, NY, USA, 224-233." + } + ] + } + ], + "index": 40 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 195, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 195, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 195, + 68 + ], + "type": "text", + "content": "Unified Generative Search and Recommendation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 386, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 386, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 386, + 60, + 559, + 69 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 86, + 294, + 709 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 52, + 86, + 294, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 86, + 294, + 126 + ], + "spans": [ + { + "bbox": [ + 52, + 86, + 294, + 126 + ], + "type": "text", + "content": "[31] Zihua Si, Zhongxiang Sun, Xiao Zhang, Jun Xu, Xiaoxue Zang, Yang Song, Kun Gai, and Ji-Rong Wen. 2023. When search meets recommendation: Learning disentangled search representation for recommendation. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1313-1323." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 126, + 294, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 126, + 294, + 167 + ], + "spans": [ + { + "bbox": [ + 52, + 126, + 294, + 167 + ], + "type": "text", + "content": "[32] Fei Sun, Jun Liu, Jian Wu, Changhua Pei, Xiao Lin, Wenwu Ou, and Peng Jiang. 2019. BERT4Rec: Sequential Recommendation with Bidirectional Encoder Representations from Transformer. In Proceedings of the 28th ACM International Conference on Information and Knowledge Management (Beijing, China) (CIKM '19). ACM, New York, NY, USA, 1441-1450." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 167, + 294, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 167, + 294, + 198 + ], + "spans": [ + { + "bbox": [ + 52, + 167, + 294, + 198 + ], + "type": "text", + "content": "[33] Weiwei Sun, Lingyong Yan, Zheng Chen, Shuaiqiang Wang, Haichao Zhu, Pengjie Ren, Zhumin Chen, Dawei Yin, Maarten Rijke, and Zhaochun Ren. 2024. Learning to tokenize for generative retrieval. Advances in Neural Information Processing Systems 36 (2024)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 198, + 294, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 198, + 294, + 231 + ], + "spans": [ + { + "bbox": [ + 52, + 198, + 294, + 231 + ], + "type": "text", + "content": "[34] Jiakai Tang, Sunhao Dai, Teng Shi, Jun Xu, Xu Chen, Wen Chen, Wu Jian, and Yuning Jiang. 2025. Think Before Recommend: Unleashing the Latent Reasoning Power for Sequential Recommendation. arXiv:2503.22675 [cs.IR] https://arxiv.org/abs/2503.22675" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 231, + 294, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 231, + 294, + 262 + ], + "spans": [ + { + "bbox": [ + 52, + 231, + 294, + 262 + ], + "type": "text", + "content": "[35] Yi Tay, Vinh Tran, Mostafa Dehghani, Jianmo Ni, Dara Bahri, Harsh Mehta, Zhen Qin, Kai Hui, Zhe Zhao, Jai Gupta, et al. 2022. Transformer memory as a differentiable search index. Advances in Neural Information Processing Systems 35 (2022), 21831-21843." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 262, + 294, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 262, + 294, + 286 + ], + "spans": [ + { + "bbox": [ + 52, + 262, + 294, + 286 + ], + "type": "text", + "content": "[36] Liang Wang, Nan Yang, Xiaolong Huang, Linjun Yang, Rangan Majumder, and Furu Wei. 2024. Multilingual e5 text embeddings: A technical report. arXiv preprint arXiv:2402.05672 (2024)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 286, + 294, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 286, + 294, + 326 + ], + "spans": [ + { + "bbox": [ + 52, + 286, + 294, + 326 + ], + "type": "text", + "content": "[37] Yuening Wang, Man Chen, Yaochen Hu, Wei Guo, Yingxue Zhang, Hufeng Guo, Yong Liu, and Mark Coates. 2024. Enhancing Click-through Rate Prediction in Recommendation Domain with Search Query Representation. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 2462-2471." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 326, + 294, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 326, + 294, + 358 + ], + "spans": [ + { + "bbox": [ + 52, + 326, + 294, + 358 + ], + "type": "text", + "content": "[38] Yujing Wang, Yingyan Hou, Haonan Wang, Ziming Miao, Shibin Wu, Qi Chen, Yuqing Xia, Chengmin Chi, Guoshuai Zhao, Zheng Liu, et al. 2022. A neural corpus indexer for document retrieval. Advances in Neural Information Processing Systems 35 (2022), 25600-25614." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 358, + 294, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 358, + 294, + 397 + ], + "spans": [ + { + "bbox": [ + 52, + 358, + 294, + 397 + ], + "type": "text", + "content": "[39] Yu Wang, Zhengyang Wang, Hengrui Zhang, Qingyu Yin, Xianfeng Tang, Yinghan Wang, Danqing Zhang, Limeng Cui, Monica Cheng, Bing Yin, et al. 2023. Exploiting intent evolution in e-commercial query recommendation. In Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 5162-5173." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 398, + 294, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 398, + 294, + 430 + ], + "spans": [ + { + "bbox": [ + 52, + 398, + 294, + 430 + ], + "type": "text", + "content": "[40] Shitao Xiao, Zheng Liu, Peitian Zhang, Niklas Muennighoff, Defu Lian, and Jian-Yun Nie. 2024. C-pack: Packed resources for general chinese embeddings. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 641-649." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 430, + 294, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 430, + 294, + 453 + ], + "spans": [ + { + "bbox": [ + 52, + 430, + 294, + 453 + ], + "type": "text", + "content": "[41] Jiayi Xie, Shang Liu, Gao Cong, and Zhenzhong Chen. 2024. UnifiedSSR: A Unified Framework of Sequential Search and Recommendation. In Proceedings of the ACM on Web Conference 2024. 3410-3419." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 453, + 294, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 453, + 294, + 485 + ], + "spans": [ + { + "bbox": [ + 52, + 453, + 294, + 485 + ], + "type": "text", + "content": "[42] Lee Xiong, Chenyan Xiong, Ye Li, Kwok-Fung Tang, Jialin Liu, Paul Bennett, Junaid Ahmed, and Arnold Overwijk. 2020. Approximate nearest neighbor negative contrastive learning for dense text retrieval. arXiv preprint arXiv:2007.00808 (2020)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 52, + 485, + 294, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 485, + 294, + 533 + ], + "spans": [ + { + "bbox": [ + 52, + 485, + 294, + 533 + ], + "type": "text", + "content": "[43] Jing Yao, Zhicheng Dou, Ruobing Xie, Yanxiong Lu, Zhiping Wang, and Ji-Rong Wen. 2021. USER: A Unified Information Search and Recommendation Model Based on Integrated Behavior Sequence. In Proceedings of the 30th ACM International Conference on Information J& Knowledge Management (Virtual Event, Queensland, Australia) (CIKM '21). Association for Computing Machinery, New York, NY, USA, 2373-2382." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 52, + 533, + 294, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 533, + 294, + 572 + ], + "spans": [ + { + "bbox": [ + 52, + 533, + 294, + 572 + ], + "type": "text", + "content": "[44] Zheng Yuan, Fajie Yuan, Yu Song, Youhua Li, Junchen Fu, Fei Yang, Yunzhu Pan, and Yongxin Ni. 2023. Where to go next for recommender systems? id-vs. modality-based recommender models revisited. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2639-2649." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 52, + 572, + 294, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 572, + 294, + 604 + ], + "spans": [ + { + "bbox": [ + 52, + 572, + 294, + 604 + ], + "type": "text", + "content": "[45] Zhenrui Yue, Yueqi Wang, Zhankui He, Huimin Zeng, Julian McAuley, and Dong Wang. 2024. Linear recurrent units for sequential recommendation. In Proceedings of the 17th ACM International Conference on Web Search and Data Mining. 930-938." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 52, + 604, + 294, + 637 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 604, + 294, + 637 + ], + "spans": [ + { + "bbox": [ + 52, + 604, + 294, + 637 + ], + "type": "text", + "content": "[46] Hamed Zamani and W. Bruce Croft. 2018. Joint Modeling and Optimization of Search and Recommendation. In Proceedings of the First Biennial Conference on Design of Experimental Search & Information Retrieval Systems, Bertinoro, Italy, August 28-31, 2018 (CEUR Workshop Proceedings, Vol. 2167). CEUR-WS.org, 36-41." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 52, + 637, + 294, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 637, + 294, + 669 + ], + "spans": [ + { + "bbox": [ + 52, + 637, + 294, + 669 + ], + "type": "text", + "content": "[47] Hamed Zamani and W. Bruce Croft. 2020. Learning a Joint Search and Recommendation Model from User-Item Interactions. In Proceedings of the 13th International Conference on Web Search and Data Mining (Houston, TX, USA) (WSDM '20). Association for Computing Machinery, New York, NY, USA, 717–725." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 52, + 669, + 294, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 669, + 294, + 693 + ], + "spans": [ + { + "bbox": [ + 52, + 669, + 294, + 693 + ], + "type": "text", + "content": "[48] Changshuo Zhang, Teng Shi, Xiao Zhang, Qi Liu, Ruobing Xie, Jun Xu, and Ji-Rong Wen. 2024. Modeling Domain and Feedback Transitions for Cross-Domain Sequential Recommendation. arXiv preprint arXiv:2408.08209 (2024)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 52, + 693, + 294, + 709 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 693, + 294, + 709 + ], + "spans": [ + { + "bbox": [ + 52, + 693, + 294, + 709 + ], + "type": "text", + "content": "[49] Changshuo Zhang, Teng Shi, Xiao Zhang, Yanping Zheng, Ruobing Xie, Qi Liu, Jun Xu, and Ji-Rong Wen. 2024. QAGCF: Graph Collaborative Filtering for Q&A" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 317, + 87, + 559, + 398 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 331, + 87, + 495, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 87, + 495, + 95 + ], + "spans": [ + { + "bbox": [ + 331, + 87, + 495, + 95 + ], + "type": "text", + "content": "Recommendation. arXiv preprint arXiv:2406.04828 (2024)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 95, + 559, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 95, + 559, + 118 + ], + "spans": [ + { + "bbox": [ + 317, + 95, + 559, + 118 + ], + "type": "text", + "content": "[50] Changshuo Zhang, Xiao Zhang, Teng Shi, Jun Xu, and Ji-Rong Wen. 2025. Test-Time Alignment for Tracking User Interest Shifts in Sequential Recommendation. arXiv:2504.01489 [cs.IR] https://arxiv.org/abs/2504.01489" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 118, + 559, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 118, + 559, + 150 + ], + "spans": [ + { + "bbox": [ + 317, + 118, + 559, + 150 + ], + "type": "text", + "content": "[51] Kepu Zhang, Teng Shi, Sunhao Dai, Xiao Zhang, Yinfeng Li, Jing Lu, Xiaoxue Zang, Yang Song, and Jun Xu. 2024. SAQRec: Aligning Recommender Systems to User Satisfaction via Questionnaire Feedback. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 3165-3175." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 150, + 559, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 150, + 559, + 175 + ], + "spans": [ + { + "bbox": [ + 317, + 150, + 559, + 175 + ], + "type": "text", + "content": "[52] Xiao Zhang, Teng Shi, Jun Xu, Zhenhua Dong, and Ji-Rong Wen. 2024. Model-Agnostic Causal Embedding Learning for Counterfactually Group-Fair Recommendation. IEEE Transactions on Knowledge and Data Engineering (2024)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 175, + 559, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 175, + 559, + 214 + ], + "spans": [ + { + "bbox": [ + 317, + 175, + 559, + 214 + ], + "type": "text", + "content": "[53] Yuting Zhang, Yiqing Wu, Ruidong Han, Ying Sun, Yongchun Zhu, Xiang Li, Wei Lin, Fuzhen Zhuang, Zhulin An, and Yongjun Xu. 2024. Unified Dual-Intent Translation for Joint Modeling of Search and Recommendation. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 6291-6300." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 214, + 559, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 214, + 559, + 255 + ], + "spans": [ + { + "bbox": [ + 317, + 214, + 559, + 255 + ], + "type": "text", + "content": "[54] Kai Zhao, Yukun Zheng, Tao Zhuang, Xiang Li, and Xiaoyi Zeng. 2022. Joint Learning of E-Commerce Search and Recommendation with a Unified Graph Neural Network. In Proceedings of the Fifteenth ACM International Conference on Web Search and Data Mining (Virtual Event, AZ, USA) (WSDM '22). Association for Computing Machinery, New York, NY, USA, 1461–1469." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 255, + 559, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 255, + 559, + 278 + ], + "spans": [ + { + "bbox": [ + 317, + 255, + 559, + 278 + ], + "type": "text", + "content": "[55] Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, et al. 2023. A survey of large language models. arXiv preprint arXiv:2303.18223 (2023)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 278, + 559, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 278, + 559, + 311 + ], + "spans": [ + { + "bbox": [ + 317, + 278, + 559, + 311 + ], + "type": "text", + "content": "[56] Bowen Zheng, Yupeng Hou, Hongyu Lu, Yu Chen, Wayne Xin Zhao, Ming Chen, and Ji-Rong Wen. 2024. Adapting large language models by integrating collaborative semantics for recommendation. In 2024 IEEE 40th International Conference on Data Engineering (ICDE). IEEE, 1435-1448." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 311, + 559, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 311, + 559, + 342 + ], + "spans": [ + { + "bbox": [ + 317, + 311, + 559, + 342 + ], + "type": "text", + "content": "[57] Kun Zhou, Hui Yu, Wayne Xin Zhao, and Ji-Rong Wen. 2022. Filter-Enhanced MLP is All You Need for Sequential Recommendation. In Proceedings of the ACM Web Conference 2022 (Virtual Event, Lyon, France) (WWW'22). Association for Computing Machinery, New York, NY, USA, 2388-2399." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 317, + 342, + 559, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 342, + 559, + 365 + ], + "spans": [ + { + "bbox": [ + 317, + 342, + 559, + 365 + ], + "type": "text", + "content": "[58] Yujia Zhou, Jing Yao, Ledell Wu, Zhicheng Dou, and Ji-Rong Wen. 2023. WebUltron: An Ultimate Retriever on Webpages Under the Model-Centric Paradigm. IEEE Transactions on Knowledge and Data Engineering (2023)." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 317, + 365, + 559, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 365, + 559, + 398 + ], + "spans": [ + { + "bbox": [ + 317, + 365, + 559, + 398 + ], + "type": "text", + "content": "[59] Shengyao Zhuang, Houxing Ren, Linjun Shou, Jian Pei, Ming Gong, Guido Zuccon, and Daxin Jiang. 2022. Bridging the gap between indexing and retrieval for differentiable search index with query generation. arXiv preprint arXiv:2206.10128 (2022)." + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 224, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 224, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 224, + 68 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 515, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 515, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 515, + 60, + 558, + 68 + ], + "type": "text", + "content": "Teng Shi et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05731/1d2e0a8a-e48a-4cb6-8ec0-35774101eda9_content_list.json b/data/2025/2504_05xxx/2504.05731/1d2e0a8a-e48a-4cb6-8ec0-35774101eda9_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..72b81e396eea4b4f3c2e5f15ee22e62193fda298 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/1d2e0a8a-e48a-4cb6-8ec0-35774101eda9_content_list.json @@ -0,0 +1,2331 @@ +[ + { + "type": "text", + "text": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation", + "text_level": 1, + "bbox": [ + 84, + 99, + 913, + 148 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Teng Shi", + "bbox": [ + 259, + 157, + 334, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Renmin University of China", + "bbox": [ + 202, + 175, + 393, + 189 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 250, + 190, + 346, + 204 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "shiteng@ruc.edu.cn", + "bbox": [ + 230, + 205, + 364, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiaoxue Zang", + "bbox": [ + 238, + 248, + 354, + 265 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kai Zheng", + "bbox": [ + 254, + 266, + 339, + 282 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kuaishou Technology Co., Ltd.", + "bbox": [ + 194, + 282, + 401, + 297 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 248, + 297, + 344, + 311 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "xxic666@126.com", + "bbox": [ + 236, + 313, + 357, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "zhengk92@gmail.com", + "bbox": [ + 222, + 328, + 372, + 343 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 83, + 352, + 156, + 366 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, the personalization of Large Language Models (LLMs) to generate content that aligns with individual user preferences has garnered widespread attention. Personalized Retrieval-Augmented Generation (RAG), which retrieves relevant documents from the user's history to reflect their preferences and enhance LLM generation, is one commonly used approach for personalization. However, existing personalized RAG methods do not consider that the histories of similar users can also assist in personalized generation for the current user, meaning that collaborative information between users can also benefit personalized generation. Inspired by the application of collaborative filtering in recommender systems, we propose a method called CFRAG, which adapts Collaborative Filtering to RAG for personalized text generation. However, this presents two challenges: (1) how to incorporate collaborative information without explicit user similarity labels? (2) how to retrieve documents that support personalized LLM generation? For Challenge 1, we use contrastive learning to train user embeddings to retrieve similar users and introduce collaborative information. For Challenge 2, we design a personalized retriever and reranker to retrieve the top- $k$ documents from these users' histories. We take into account the user's preference during retrieval and reranking. Then we leverage feedback from the LLM to fine-tune the personalized retriever and reranker, enabling them to retrieve documents that meet the personalized generation needs of the LLM. Experimental results on the Language Model Personalization (LaMP) benchmark", + "bbox": [ + 81, + 369, + 482, + 717 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "*Corresponding authors. Work partially done at Engineering Research Center of Next-Generation Intelligent Search and Recommendation, Ministry of Education. \nWork done when Teng Shi was the intern at Kuaishou.", + "bbox": [ + 81, + 731, + 482, + 763 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.", + "bbox": [ + 81, + 779, + 482, + 852 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "SIGIR '25, Padua, Italy.", + "bbox": [ + 84, + 853, + 191, + 863 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM. ACM ISBN 979-8-4007-1592-1/25/07", + "bbox": [ + 84, + 864, + 472, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://doi.org/10.1145/XXXXXX.XXXXXXX", + "bbox": [ + 84, + 883, + 289, + 895 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jun Xu*", + "bbox": [ + 666, + 157, + 730, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiao Zhang", + "bbox": [ + 651, + 175, + 748, + 191 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Renmin University of China", + "bbox": [ + 606, + 191, + 795, + 205 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 651, + 207, + 750, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{junxu,zhangx89}@ruc.edu.cn", + "bbox": [ + 599, + 222, + 800, + 236 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yang Song", + "bbox": [ + 656, + 248, + 743, + 265 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Han Li", + "bbox": [ + 669, + 267, + 728, + 280 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kuaishou Technology Co., Ltd.", + "bbox": [ + 596, + 282, + 803, + 297 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 651, + 297, + 748, + 311 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ys@sonyis.me", + "bbox": [ + 651, + 313, + 748, + 327 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "lihan08@kuaishou.com", + "bbox": [ + 620, + 328, + 779, + 340 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "validate the effectiveness of CFRAG. Further analysis confirms the importance of incorporating collaborative information.", + "bbox": [ + 513, + 353, + 913, + 382 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "CCS Concepts", + "text_level": 1, + "bbox": [ + 514, + 393, + 633, + 410 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "- Information systems $\\rightarrow$ Personalization; - Computing methodologies $\\rightarrow$ Natural language generation.", + "bbox": [ + 513, + 412, + 926, + 441 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords", + "text_level": 1, + "bbox": [ + 514, + 454, + 599, + 469 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large language model; Personalization; Retrieval augmented generation", + "bbox": [ + 513, + 472, + 913, + 500 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM Reference Format:", + "text_level": 1, + "bbox": [ + 514, + 508, + 661, + 518 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Teng Shi, Jun Xu, Xiao Zhang, Xiaoxue Zang, Kai Zheng, Yang Song, and Han Li. 2025. Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation. In Proceedings of the 48th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '25), July 13-18, 2025, Padua, Italy. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/XXXXXX.XXXXXXX", + "bbox": [ + 513, + 521, + 913, + 595 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 514, + 614, + 650, + 628 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Personalizing Large Language Models (LLMs) [55] to generate personalized outputs tailored to individual user preferences has emerged as a significant and rapidly growing field [16, 23, 29, 31, 32, 36, 37, 57]. Personalized Retrieval-Augmented Generation (RAG) [8] has become a commonly used approach for personalizing LLMs [29, 31, 32, 57].", + "bbox": [ + 511, + 632, + 913, + 715 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The process of existing personalized RAG methods typically involves retrieving similar documents from the user's historical behaviors based on the user's input query, then concatenating these documents with the query as a prompt input to the LLM for generation. Although effective, this approach is limited to retrieving only the current user's history, neglecting collaborative information. Users with similar histories tend to be more alike, and the information from these similar users can also aid in personalizing generation for the current user. As shown in the example in Figure 1, the upper part illustrates the results of the existing RAG method, which retrieves documents from the current user's history. We can only infer from these results that \"She\" in the user's input refers to \"Hillary Clinton\". In contrast, the lower part demonstrates", + "bbox": [ + 511, + 715, + 913, + 896 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.05731v1 [cs.IR] 8 Apr 2025", + "bbox": [ + 22, + 279, + 60, + 700 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c145d4b73d48d432adc24495509ff5b7dc143be4525fd4b84a31137fa6d78e46.jpg", + "image_caption": [ + "Figure 1: An example from the LaMP-4 dataset [32]. The task of LaMP-4 is to generate personalized news headlines based on user input. This example illustrates the benefit of collaborative information for LLM personalization: (a) The top shows results retrieved by the existing RAG method from the current user's history, where we can only infer that \"She\" in the user's input refers to \"Hillary Clinton\". (b) The bottom shows results retrieved by our method from similar users' histories, allowing us to infer further that \"his\" in the user's input refers to \"Donald Trump\" thus enabling the generation of a more accurate result." + ], + "image_footnote": [], + "bbox": [ + 106, + 103, + 883, + 348 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "our method, which retrieves documents from the history of similar users. In this case, we can further infer that \"his\" in the user's input refers to \"Donald Trump\", leading to a better generation result. From this example, we can see that incorporating collaborative information allows the retrieval of more diverse documents, helping the LLM generate results that better meet the user's needs.", + "bbox": [ + 81, + 431, + 482, + 513 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Inspired by the application of collaborative filtering in recommender systems [11, 40, 46], we propose to adapt collaborative information into RAG to personalize LLMs. However, adapting collaborative filtering to personalized RAG presents two challenges. Challenge 1: How to incorporate collaborative information. Without explicit labels indicating which users are similar, which users' information should be selected to help personalize generation for the current user? Challenge 2: How to retrieve documents that support personalized LLM generation, rather than relying on traditional semantic relevance? Pre-trained dense retrieval models [54] only retrieve based on the semantic relevance between the query and document. Directly using these models for retrieval may not necessarily result in content that allows the LLM to generate outputs that meet the user's needs [25, 35].", + "bbox": [ + 81, + 513, + 482, + 708 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address the above challenges, this paper proposes a method named CFRAG which adapts Collaborative Filtering to personalized Retrieval Augmented Generation. Firstly, to address Challenge 1, since there are no explicit user similarity labels, we use contrastive learning [15, 44] to train user embeddings for retrieving similar users to introduce collaborative information. Specifically, we apply different data augmentation methods to the user's history to obtain different views, and then treat different views of the same user's history as positive samples for each other. Then we use contrastive learning on different views to train the user embeddings. Secondly, for Challenge 2, we designed a personalized retriever and reranker to retrieve the top- $k$ documents from the histories of the retrieved users. In both retrieval and reranking, in addition to the semantic", + "bbox": [ + 81, + 708, + 482, + 888 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "relevance between the query and documents, we also considered the user's preferences for different documents to enable personalized retrieval. Additionally, we further fine-tune the retriever and reranker based on the feedback from the LLM to ensure that the retrieved documents better support the personalized LLM generation. Finally, the top- $k$ documents are concatenated with the user's input query to form a prompt, which is then fed into the LLM for personalized generation.", + "bbox": [ + 511, + 431, + 913, + 542 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The major contributions of the paper are summarized as follows: We analyzed the necessity of introducing collaborative filtering into RAG for LLM personalization and identified the challenges: how to introduce collaborative information and how to retrieve documents that support personalized LLM generation.", + "bbox": [ + 513, + 542, + 913, + 612 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We proposed a method called CFRAG, which uses contrastive learning to train user embeddings for retrieving similar users and incorporating collaborative information. It leverages LLM feedback to train the personalized retriever and reranker, enabling them to retrieve documents that support personalized LLM generation.", + "- Experimental results on the Language Model Personalization (LaMP) [32] benchmark validate the effectiveness of CFRAG. The experimental analysis also demonstrates the importance of leveraging collaborative information." + ], + "bbox": [ + 513, + 611, + 913, + 736 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 514, + 762, + 660, + 776 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Personalization of LLMs. Large Language Models (LLMs) [55] have demonstrated remarkable capabilities in various fields, such as text generation [22], information retrieval [56], recommender systems [5, 41], and so on. However, since LLMs are typically designed to serve all tasks with a single model and are trained on broad, domain-agnostic data, they face challenges in adapting to the personalized needs of individual users [4, 32]. Therefore, LLM personalization has attracted widespread attention [16, 31, 57].", + "bbox": [ + 511, + 785, + 913, + 896 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "SIGIR '25, July 13-18, 2025, Padua, Italy.", + "bbox": [ + 84, + 75, + 279, + 85 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Teng Shi et al.", + "bbox": [ + 841, + 75, + 911, + 87 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/34f15c2d425a0ba3e0b25f67abb59aecc0f185aae42a557093eae9d488fd5f26.jpg", + "image_caption": [ + "Figure 2: The architecture of CFRAG. From left to right: (a) User Retrieval retrieves similar users (Section 4.1); (b) Retriever retrieves the top- $k$ documents from each user's history (Section 4.2); (c) Reranker reranks the $m \\times k$ documents to get the final top- $k$ documents, which are then concatenated with the query and input into the LLM for personalized text generation (Section 4.3)." + ], + "image_footnote": [], + "bbox": [ + 109, + 104, + 880, + 335 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Existing works on LLM personalization mainly include the following types of methods: (1) Fine-tuning a personalized LLM for each user [36, 37, 42]; Tan et al. [37] fine-tuned the LLM using LoRA [12] to get personalized LoRA parameters for each user. (2) Aligning LLMs with user-specific preferences through Reinforcement Learning from Human Feedback (RLHF) [16, 23, 43]; Jang et al. [16] first trained different parameters for various objectives using RLHF, then merged these parameters based on users' personalized needs. (3) Incorporating user-specific context into the prompt [21, 27, 29, 31, 32, 57]. Richardson et al. [29] used instruction-tuned LLMs to summarize user history and then incorporated it into prompts for generation. Salemi et al. [31, 32] used RAG to retrieve relevant documents from user history based on the input query and incorporated them into the prompt.", + "bbox": [ + 81, + 404, + 483, + 598 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This paper further introduces collaborative filtering for personalization based on the RAG framework. Collaborative filtering has already been applied in fields such as recommender systems [33, 34, 38, 48-52] and has been proven effective. It assumes that users who have interacted with similar items share similar preferences, and recommending items from similar users to the current user can meet their needs. Some works [11, 46] learn the collaborative information between users and items through matrix factorization [19], while others [10, 40] further explore higher-order collaborative information between users and items using graph neural networks. The application of collaborative filtering in LLM personalization remains under-explored.", + "bbox": [ + 81, + 598, + 483, + 763 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Retrieval Augmented Generation. Retrieval Augmented Generation [7, 8] introduces external knowledge through document retrieval, alleviating issues such as LLM hallucinations [53], and enhancing LLMs' capabilities in knowledge-intensive tasks [17] such as open-domain question answering [14, 20]. Some works [3, 13] encode retrieved documents using separate encoders, and then fuse the results with the language model using cross-attention. A more common approach is to directly include the retrieved documents in the prompt of the LLM [2, 9, 20, 25, 35]. In recent years, this", + "bbox": [ + 81, + 767, + 483, + 893 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "in-context RAG framework has also been applied to LLM personalization, which is personalized by retrieving documents from the user's history [31, 32, 57]. This paper introduces collaborative filtering by retrieving similar users' histories for better personalization.", + "bbox": [ + 513, + 404, + 916, + 460 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Problem Formulation", + "text_level": 1, + "bbox": [ + 514, + 470, + 722, + 486 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Let $\\mathcal{U} = \\{u_1, u_2, \\ldots, u_M\\}$ denotes the set of all users, where $M$ is the number of users. Each user $u \\in \\mathcal{U}$ has a chronologically ordered history $\\mathcal{H}_u = [d_1, d_2, \\ldots, d_N]$ which includes all her historical documents, where $N$ is the number of documents in the history. The personalized text generation dataset is $\\mathcal{D} = \\{(u, q, y)_i\\}_{i=1}^{|D|}$ . For each instance, $q$ is the query input by the user $u$ to the LLM, and $y$ is the target output. Our goal is first to introduce collaborative information by retrieving the top- $m$ most similar users for user $u$ :", + "bbox": [ + 513, + 489, + 915, + 604 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {U} _ {\\text {r e t r i e v e d}} = \\left\\{u _ {1}, u _ {2}, \\dots , u _ {m} \\right\\}.\n$$\n", + "text_format": "latex", + "bbox": [ + 624, + 609, + 803, + 625 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Then, we use a retriever to retrieve the top- $k$ documents from each of the $m$ users' histories, resulting in a total of $m \\times k$ documents.", + "bbox": [ + 513, + 631, + 913, + 657 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {D} _ {\\text {r e t r i e v e d}} = \\{d _ {i, j} | i \\in \\{1, \\dots , m \\}, j \\in \\{1, \\dots , k \\} \\}.\n$$\n", + "text_format": "latex", + "bbox": [ + 568, + 665, + 857, + 680 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Finally, we use a reranker to rerank these $m \\times k$ documents and obtain the final top- $k$ documents:", + "bbox": [ + 513, + 685, + 913, + 713 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {D} _ {\\text {r e r a n k e d}} = \\left\\{d _ {i} | i \\in \\{1, \\dots , k \\} \\right\\}.\n$$\n", + "text_format": "latex", + "bbox": [ + 617, + 719, + 808, + 734 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "These top- $k$ documents will be concatenated with the user's query $q$ as a prompt and input into the LLM, enabling it to generate a response that aligns with the target output $y$ .", + "bbox": [ + 513, + 739, + 911, + 781 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This paper primarily focuses on how to retrieve $\\mathcal{U}_{\\mathrm{retrieved}}$ to introduce collaborative information, and how to train the retriever and reranker so that they can effectively retrieve documents that support the personalized LLM generation.", + "bbox": [ + 513, + 781, + 913, + 838 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4 Our Approach", + "text_level": 1, + "bbox": [ + 514, + 849, + 663, + 864 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This section introduces our method CFRAG. CFRAG's overall architecture is shown in Figure 2. As mentioned in Section 1, to address", + "bbox": [ + 513, + 867, + 915, + 896 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation", + "bbox": [ + 83, + 75, + 532, + 87 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "SIGIR '25, July 13-18, 2025, Padua, Italy.", + "bbox": [ + 718, + 75, + 913, + 87 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Challenge 1, i.e., how to introduce collaborative information, we first train user embeddings using contrastive learning to retrieve the top- $m$ most similar users (see Section 4.1). For Challenge 2, which involves retrieving documents that support personalized LLM generation, we fine-tune the personalized retriever and reranker using LLM feedback. The retriever first retrieves the top- $k$ documents from the history of each of the $m$ users, resulting in $m \\times k$ documents (see Section 4.2). The reranker then reranks these documents to obtain the final top- $k$ documents as input for the LLM (see Section 4.3).", + "bbox": [ + 81, + 106, + 483, + 244 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1 User Retrieval", + "text_level": 1, + "bbox": [ + 83, + 258, + 243, + 272 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "First, we perform user retrieval to get the top- $m$ most similar users for user $u$ to introduce collaborative information. However, we do not have labels indicating which users are similar to each other. To address this, we employ a contrastive learning [15, 44] approach. We apply different data augmentation methods to the user history $\\mathcal{H}_u$ to obtain different views of the user's history. We treat different views of the same user as positive samples and the histories of other users as negative samples, and then we use the InfoNCE [28] loss to train user embeddings for retrieval. Figure 3 illustrates the process of training user embeddings using contrastive learning.", + "bbox": [ + 81, + 277, + 483, + 417 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1.1 User Encoder. Specifically, we first use an embedding model (such as BERT [6], RoBERTa [26], BGE [45] etc.) $\\mathbf{Emb}(\\cdot)$ to encode each document in the user's history $\\mathcal{H}_u$ to obtain $\\mathbf{E}_u = [\\mathbf{e}_1,\\mathbf{e}_2,\\dots ,\\mathbf{e}_N]^{\\intercal}\\in \\mathbb{R}^{N\\times d}$ , where $\\mathbf{e}_i = \\mathbf{Emb}(d_i)$ and $d$ is the embedding dimension. To model the sequential relationships between different documents in the user's history, we introduce positional embedding $\\mathbf{P}\\in \\mathbb{R}^{N\\times d}$ . Afterward, the history $\\mathcal{H}_u$ 's embedding becomes $\\widehat{\\mathbf{E}}_u = \\mathbf{E}_u + \\mathbf{P}$ . Then, we apply a transformer [39] as the user encoder to encode the user's history $\\widehat{\\mathbf{E}}_u$ and average the transformer's output to obtain the user's embedding:", + "bbox": [ + 81, + 424, + 483, + 566 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {e} _ {u} = \\operatorname {E n c o d e r} _ {u} (u) = \\operatorname {M E A N} (\\operatorname {T r m} (\\widehat {\\mathbf {F}} _ {u})) \\in \\mathbb {R} ^ {d}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 145, + 574, + 480, + 592 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathrm{Encoder}_u(\\cdot)\\to \\mathbb{R}^d$ denotes the user encoder, $\\mathrm{Trm}(\\cdot)$ denotes a transformer encoder. Next, we train the transformer encoder using contrastive learning.", + "bbox": [ + 81, + 598, + 480, + 641 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1.2 Data Augmentation. We generate different views of $\\mathcal{H}_u$ using the following three data augmentation methods:", + "bbox": [ + 83, + 648, + 480, + 676 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Document Crop. We randomly select a continuous sub-sequence of length $L_{c} = \\lfloor \\eta_{c}N\\rfloor$ from $\\mathcal{H}_u$ , where $\\eta_c$ is a hyper-parameter controlling the crop ratio. The history after cropping is as follows:", + "bbox": [ + 83, + 676, + 483, + 719 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {H} _ {u} ^ {\\mathrm {c r o p}} = [ d _ {c}, d _ {c + 1}, \\dots , d _ {c + L _ {c} - 1} ].\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 724, + 379, + 744 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Document Mask. For the history $\\mathcal{H}_u$ , we randomly mask out $L_{m} = \\lfloor \\eta_{m}N\\rfloor$ documents $\\mathcal{I}_{\\mathrm{mask}} = \\{i_1,i_2,\\dots ,i_{L_m}\\}$ , where $\\mathcal{I}_{\\mathrm{mask}}$ is the set of indices corresponding to the masked documents and $\\eta_{m}$ is a hyper-parameter that controls the mask ratio. The masked documents are replaced with a special token [mask]. The history after masking is as follows:", + "bbox": [ + 81, + 750, + 483, + 834 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {H} _ {u} ^ {\\text {m a s k}} = \\left[ \\hat {d} _ {1}, \\hat {d} _ {2}, \\dots , \\hat {d} _ {N} \\right], \\\\ \\hat {d} _ {i} = \\left\\{ \\begin{array}{l l} d _ {i}, & i \\notin \\mathcal {I} _ {\\text {m a s k}}, \\\\ [ \\text {m a s k} ], & i \\in \\mathcal {I} _ {\\text {m a s k}}. \\end{array} \\right. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 183, + 840, + 375, + 898 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/625fdaef24bf247580e641a34943670ea15cb8f8da28784bc68a9e6a62dbe508.jpg", + "image_caption": [ + "Figure 3: Contrastive learning for user embedding training." + ], + "image_footnote": [], + "bbox": [ + 522, + 104, + 903, + 244 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Document Reorder. We randomly select a sub-sequence $[d_r, d_{r+1}, \\ldots, d_{r+L_r-1}]$ of length $L_r = \\lfloor \\eta_r N \\rfloor$ from $\\mathcal{H}_u$ , where $\\eta_r$ is a hyper-parameter controlling the reorder ratio, and then randomly shuffle the order of the documents within the sub-sequence to obtain $[\\hat{d}_r, \\hat{d}_{r+1}, \\ldots, \\hat{d}_{r+L_r-1}]$ . The history after reordering is as follows:", + "bbox": [ + 513, + 277, + 913, + 359 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {H} _ {u} ^ {\\text {r e o r d e r}} = \\left[ d _ {1}, d _ {2}, \\dots , \\hat {d} _ {r}, \\dots , \\hat {d} _ {r + L _ {r} - 1}, \\dots , d _ {N} \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 570, + 364, + 854, + 383 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1.3 Contrastive Loss. Each time, we randomly select two data augmentation methods $\\mathcal{A}'$ and $\\mathcal{A}''$ to generate two different views of $\\mathcal{H}_u$ , denoted as $\\mathcal{H}_u'$ and $\\mathcal{H}_u''$ . Then, using the encoder described in Section 4.1.1, we obtain the user embeddings $\\mathbf{e}_u'$ and $\\mathbf{e}_u''$ corresponding to the different views. Since $\\mathbf{e}_u'$ and $\\mathbf{e}_u''$ are obtained through data augmentation of $\\mathcal{H}_u$ , they are more similar to each other. Therefore, we treat them as positive samples for each other and use the views generated from the augmented histories of other users in the same batch as negative samples. We then perform contrastive learning using the InfoNCE [28] loss as follows:", + "bbox": [ + 513, + 388, + 913, + 527 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {C L}} = - \\left[ \\log \\frac {\\exp \\left(\\cos \\left(\\mathbf {e} _ {u} ^ {\\prime} , \\mathbf {e} _ {u} ^ {\\prime \\prime}\\right) / \\tau_ {1}\\right)}{\\sum_ {u ^ {-} \\in \\mathcal {U} _ {\\mathrm {n e g}}} \\exp \\left(\\cos \\left(\\mathbf {e} _ {u} ^ {\\prime} , \\mathbf {e} _ {u ^ {-}} ^ {\\prime \\prime}\\right) / \\tau_ {1}\\right)} \\right. \\tag {2} \\\\ \\left. + \\log \\frac {\\exp \\left(\\cos \\left(\\mathbf {e} _ {u} ^ {\\prime} , \\mathbf {e} _ {u} ^ {\\prime \\prime}\\right) / \\tau_ {1}\\right)}{\\sum_ {u ^ {-} \\in \\mathcal {U} _ {\\text {n e g}}} \\exp \\left(\\cos \\left(\\mathbf {e} _ {u ^ {-}} ^ {\\prime}, \\mathbf {e} _ {u} ^ {\\prime \\prime}\\right) / \\tau_ {1}\\right)} \\right], \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 563, + 532, + 911, + 608 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\tau_{1}$ is the temperature coefficient, $\\mathcal{U}_{\\mathrm{neg}}$ are the set of randomly sampled in-batch negative samples, and $\\cos (\\cdot)$ denotes the cosine similarity.", + "bbox": [ + 513, + 613, + 915, + 655 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1.4 Top- $m$ User Retrieval. After training with contrastive learning, we can use the encoder from Section 4.1.1 to obtain the user embedding $\\mathbf{e}_u$ . We then calculate the cosine similarity between each pair of user embeddings and retrieve the top- $m$ most similar users $\\mathcal{U}_{\\mathrm{retrieved}} = \\{u_1, u_2, \\dots, u_m\\}$ for user $u$ . Subsequently, the histories of these $m$ users will be used for further document retrieval.", + "bbox": [ + 513, + 662, + 915, + 744 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2 Document Retrieval", + "text_level": 1, + "bbox": [ + 514, + 758, + 723, + 772 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After retrieving the top- $m$ users, we design a personalized retriever to retrieve the top- $k$ documents from each user's history, resulting in a total of $m \\times k$ candidate documents $\\mathcal{D}_{\\text{retrieved}} = \\{d_{i,j} | i \\in \\{1, \\ldots, m\\}, j \\in \\{1, \\ldots, k\\}\\}$ . This section introduces how the retriever is designed and how it's trained to retrieve documents that better align with the requirements of personalized LLM generation.", + "bbox": [ + 513, + 776, + 915, + 861 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2.1 Retriever. First, we use a pre-trained dense retrieval model (such as BGE retriever [45]) to compute the semantic relevance", + "bbox": [ + 513, + 867, + 913, + 896 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "SIGIR '25, July 13-18, 2025, Padua, Italy.", + "bbox": [ + 83, + 75, + 279, + 85 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Teng Shi et al.", + "bbox": [ + 841, + 75, + 911, + 85 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/92815eb6c2f51b4c9ef6ca63b04dd6a8f815dc0d32224bff35a685479dc7ee07.jpg", + "image_caption": [ + "Figure 4: The method of training the retriever and reranker using LLM feedback." + ], + "image_footnote": [], + "bbox": [ + 89, + 104, + 473, + 231 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "between the query and the candidate documents:", + "bbox": [ + 81, + 280, + 383, + 294 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nS _ {q, d} ^ {\\text {r e t r i e v e r}} = \\cos \\left(\\operatorname {E n c o d e r} _ {q} (q), \\operatorname {E n c o d e r} _ {d} (d)\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 148, + 301, + 482, + 321 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathrm{Encoder}_q(\\cdot)\\to \\mathbb{R}^d$ and $\\mathrm{Encoder}_d(\\cdot)\\rightarrow \\mathbb{R}^d$ are the encoders for the query and the document in the retrieval model, respectively. Pre-trained retrieval models typically use $S_{q,d}^{\\mathrm{retriever}}$ directly for retrieval. However, $S_{q,d}^{\\mathrm{retriever}}$ only considers the semantic relevance between the query and the document. Since different users might input the same query but expect different outputs due to their varying preferences, we further account for user personalization by calculating the preference score of the user for the document as follows:", + "bbox": [ + 81, + 330, + 483, + 462 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nS _ {u, d} ^ {\\text {r e t r i e v e r}} = \\cos \\left(\\mathrm {M L P} _ {1} \\left(\\mathbf {e} _ {u}\\right), \\operatorname {E n c o d e r} _ {d} (d)\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 156, + 470, + 482, + 489 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathrm{MLP}_1: \\mathbb{R}^d \\to \\mathbb{R}^d$ is a multi-layer perceptron that maps the user embedding to the space where the cosine similarity is computed. $\\mathbf{e}_u$ is the embedding obtained in Section 4.1.1. The total score for retrieval is computed as follows:", + "bbox": [ + 81, + 497, + 482, + 555 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nS _ {u, q, d} ^ {\\text {r e t r i e v e r}} = (1 - \\alpha) S _ {q, d} ^ {\\text {r e t r i e v e r}} + \\alpha S _ {u, d} ^ {\\text {r e t r i e v e r}}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 160, + 561, + 482, + 582 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\alpha$ is a hyper-parameter that controls the weight of personalization.", + "bbox": [ + 81, + 588, + 483, + 617 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2.2 Training. Since the pre-trained dense retrieval model is not fine-tuned for our specific task, the retrieved results may not necessarily lead to LLM responses that better match the target output $y$ [25, 35]. However, there is no ground truth indicating which documents are better. Therefore, we evaluate the difference between the LLM's output and the target output $y$ , using this as a label to train the retrieval model. Figure 4 shows the process of training the retriever using LLM feedback.", + "bbox": [ + 81, + 625, + 482, + 734 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Specifically, we first use the pre-trained retrieval model to retrieve the top- $k$ documents from each of the $m$ users' histories based on $S_{q,d}^{\\mathrm{retriever}}$ in Eq. (3), resulting in a total of $m \\times k$ candidate documents. These documents are then concatenated with the query one by one and used as prompts for the LLM, producing $m \\times k$ outputs:", + "bbox": [ + 81, + 736, + 482, + 821 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\{O _ {q, d _ {i, j}} = \\mathrm {L L M} (q, d _ {i, j}) | i \\in \\{1, \\dots , m \\}, j \\in \\{1, \\dots , k \\} \\},\n$$\n", + "text_format": "latex", + "bbox": [ + 122, + 830, + 442, + 848 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathrm{LLM}(q, d_{i,j})$ represents the output generated by inputting the concatenated query $q$ and document $d_{i,j}$ into the LLM. Then, based on the quality of these outputs, we can calculate the distribution of", + "bbox": [ + 81, + 854, + 483, + 898 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "these candidate documents as follows:", + "bbox": [ + 514, + 106, + 750, + 119 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\np _ {\\text {L L M}} \\left(d _ {i, j} \\mid q, y\\right) = \\frac {\\exp (\\operatorname {e v a l} \\left(y , O _ {q , d _ {i , j}}\\right))}{\\sum_ {i = 1} ^ {m} \\sum_ {j = 1} ^ {k} \\exp (\\operatorname {e v a l} \\left(y , O _ {q , d _ {i , j}}\\right))}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 563, + 125, + 911, + 162 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\text{eval}(\\cdot)$ measures the difference between the target output $y$ and the LLM's output, using metrics such as ROUGE [24] score. A larger value returned by $\\text{eval}(\\cdot)$ indicates a better-generated result. Similarly, we can also calculate the score distribution of the candidate documents by the retrieval model based on $S_{u,q,d}^{\\text{retriever}}$ in Eq. (5):", + "bbox": [ + 513, + 167, + 913, + 253 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\np _ {\\text {r e t r i e v e r}} \\left(d _ {i, j} \\mid q, u\\right) = \\frac {\\exp \\left(S _ {u , q , d _ {i , j}} ^ {\\text {r e t r i e v e r}}\\right)}{\\sum_ {i = 1} ^ {m} \\sum_ {j = 1} ^ {k} \\exp \\left(S _ {u , q , d _ {i , j}} ^ {\\text {r e t r i e v e r}}\\right)}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 570, + 258, + 911, + 304 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We aim for the retrieval model to retrieve documents that lead to better LLM-generated results, which means making the distribution $p_{\\mathrm{retriever}}(d|q,u)$ in Eq. (7) closer to the distribution $p_{\\mathrm{LLM}}(d|q,y)$ in Eq (6). Therefore, we compute the KL divergence between the two distributions as the loss to optimize the retriever:", + "bbox": [ + 513, + 308, + 913, + 378 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {r e t r i e v e r}} = \\mathrm {K L} \\left(p _ {\\text {r e t r i e v e r}} (d | q, u) \\mid p _ {\\text {L L M}} (d | q, y)\\right). \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 385, + 911, + 401 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3 Document Rerank", + "text_level": 1, + "bbox": [ + 514, + 412, + 709, + 426 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After retrieving $\\mathcal{D}_{\\mathrm{retrieved}}$ through the retriever, in this section, we further refine the results by reranking $\\mathcal{D}_{\\mathrm{retrieved}}$ to obtain the final top- $k$ ranked results $\\mathcal{D}_{\\mathrm{reranked}} = \\{d_i | i \\in \\{1, \\dots, k\\}\\}$ .", + "bbox": [ + 513, + 431, + 913, + 474 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3.1 Reranker. We use a pre-trained cross-encoder (such as the BGE reranker [45]) to encode the query and document, obtaining the hidden state corresponding to the [CLS] token from the last layer:", + "bbox": [ + 513, + 481, + 911, + 535 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {h} _ {q, d} = \\operatorname {C r o s s E n c o d e r} (q, d), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 630, + 539, + 911, + 556 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{h}_{q,d} \\in \\mathbb{R}^d$ . Similarly, when reranking, in addition to considering the semantic relevance between query and document, we also take into account the user's personalized preferences. However, since the cross-encoder does not encode documents separately, it cannot compute the cosine similarity between users and documents as shown in Eq. (4) to express the user preference score. Therefore, we directly concatenate the user embeddings to the output of the cross-encoder to account for the influence of user preferences. The overall score used for reranking is calculated as follows:", + "bbox": [ + 511, + 560, + 913, + 686 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nS _ {u, q, d} ^ {\\text {r e r a n k e r}} = \\mathrm {M L P} _ {3} \\left(\\operatorname {C O N C A T} \\left(\\mathbf {h} _ {q, d}, \\operatorname {M L P} _ {2} (\\mathbf {e} _ {u})\\right)\\right), \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 571, + 691, + 911, + 712 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathrm{MLP}_2: \\mathbb{R}^d \\to \\mathbb{R}^d$ and $\\mathrm{MLP}_3: \\mathbb{R}^{2d} \\to \\mathbb{R}$ are two multi-layer perceptions. $\\mathrm{CONCAT}(\\cdot)$ denotes the concatenation operation.", + "bbox": [ + 511, + 719, + 911, + 750 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3.2 Training. Similar to the retriever's training in Section 4.2.2, we also want the reranker to assign higher scores to the documents that lead to better LLM-generated results. Therefore, we train the reranker using a similar approach.", + "bbox": [ + 513, + 757, + 911, + 811 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We use the trained retrieval model from Section 4.2.2 to retrieve top- $k$ documents from the history of each of the $m$ users, resulting in a total of $m \\times k$ candidate documents. These documents are concatenated with the query $q$ and used as prompts for the LLM, producing $m \\times k$ outputs. Similar to Eq.(6), we can obtain the distribution $p_{\\mathrm{LLM}}(d|q,y)$ of these candidate documents. Based on", + "bbox": [ + 511, + 811, + 913, + 896 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation", + "bbox": [ + 83, + 75, + 532, + 87 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "SIGIR '25, July 13-18, 2025, Padua, Italy.", + "bbox": [ + 718, + 75, + 911, + 87 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/2949233b012ca5dc6bde7e95580b620544ae0db053562b8a4fa23243c5566927.jpg", + "table_caption": [ + "Table 1: Statistics of the datasets used in this paper." + ], + "table_footnote": [], + "table_body": "
DatasetLaMP-1LaMP-2LaMP-3LaMP-4LaMP-5LaMP-7
#Users6,54292920,0001,64314,68213,437
#Train6,5425,07320,00012,50014,68213,437
#Dev1,5001,4102,5001,5001,5001,498
#Test1,5001,5572,5001,8001,5001,500
", + "bbox": [ + 89, + 125, + 477, + 199 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$S_{u,q,d}^{\\mathrm{reranker}}$ in Eq. (10), we can also get the score distribution of the candidate documents by the reranker:", + "bbox": [ + 81, + 213, + 480, + 246 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\np _ {\\text {r e r a n k e r}} \\left(d _ {i, j} \\mid q, u\\right) = \\frac {\\exp \\left(S _ {u , q , d _ {i , j}} ^ {\\text {r e r a n k e r}}\\right)}{\\sum_ {i = 1} ^ {m} \\sum_ {j = 1} ^ {k} \\exp \\left(S _ {u , q , d _ {i , j}} ^ {\\text {r e r a n k e r}}\\right)}. \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 138, + 252, + 482, + 296 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We compute the KL divergence between distributions $p_{\\mathrm{reranker}}(d|q,u)$ and $p_{\\mathrm{LLM}}(d|q,y)$ as the loss to optimize the reranker:", + "bbox": [ + 81, + 301, + 488, + 330 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {r e r a n k e r}} = \\mathrm {K L} \\left(p _ {\\text {r e r a n k e r}} (d | q, u) \\mid p _ {\\text {L L M}} (d | q, y)\\right). \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 338, + 482, + 354 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The loss allows the reranker to assign higher scores to documents that enable better personalized generation by the LLM.", + "bbox": [ + 81, + 359, + 482, + 388 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4 Discussion", + "text_level": 1, + "bbox": [ + 83, + 401, + 217, + 416 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Computational Efficiency. CFRAG comprises three modules. The User Encoder is a lightweight, single-layer Transformer with inputs derived from a frozen BGE embedding (dimension 768), resulting in minimal parameter overhead. The retriever and reranker are comparable in size to BERT (approximately 100M parameters). Overall, the training cost is low due to the modest parameter size. During inference, user and document embeddings can be precomputed, requiring only similarity calculations for retrieval, ensuring minimal computational cost. This efficiency enables our method to generalize quickly to new datasets.", + "bbox": [ + 81, + 420, + 482, + 559 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 Experiments", + "text_level": 1, + "bbox": [ + 83, + 573, + 218, + 588 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We conducted experiments to evaluate the performance of CFRAG. The source code is available.", + "bbox": [ + 81, + 590, + 482, + 617 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1 Experimental Setup", + "text_level": 1, + "bbox": [ + 83, + 632, + 290, + 648 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1.1 Dataset. We conducted experiments on the Language Model Personalization (LaMP) [32] benchmark, which consists of seven personalized text generation tasks. We excluded LaMP-6 because its data is not publicly available. The remaining tasks include: LaMP-1 (Personalized Citation Identification); LaMP-2 (Personalized Movie Tagging); LaMP-3 (Personalized Product Rating); LaMP-4 (Personalized News Headline Generation); LaMP-5 (Personalized Scholarly Title Generation); LaMP-7 (Personalized Tweet Paraphrasing). We used the time-based split provided by LaMP to divide the data into training, validation, and test sets. The statistics of these datasets are shown in Table 1.", + "bbox": [ + 81, + 650, + 482, + 801 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1.2 Evaluation Metrics. Following previous works [31, 32], we evaluate Accuracy and F-1 score for LaMP-1 and LaMP-2, mean absolute error (MAE) and root mean squared error (RMSE) for LaMP-3, ROUGE-1 and ROUGE-L [24] for LaMP-4, LaMP-5 and LaMP-7.", + "bbox": [ + 81, + 811, + 482, + 867 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1.3 Baselines. In this work, we compare CFRAG with the following methods.", + "bbox": [ + 513, + 106, + 915, + 133 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "No Personalization: We directly input the user's query into the LLM without retrieving from user history, using this as the non-personalized baseline. We refer to this method as Zero Shot.", + "bbox": [ + 513, + 133, + 911, + 175 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Personalized Baselines: We compared CFRAG with methods that personalize by retrieving from user history using different retrieval models, including: (1) Random selects $k$ items randomly from the user's history; (2) Recency selects the most recent $k$ items from the user's history; (3) BM25 [30] retrieves top- $k$ items from the user's history using BM25; (4) BGE [45] retrieves top- $k$ items from the user's history using BGE retriever; (5) ROPG [31] optimizes the dense retrieval model based on the results generated by the LLM.", + "bbox": [ + 511, + 176, + 913, + 287 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1.4 Implementation Details. We conducted experiments on two LLMs: Llama3-8B-Instruct [1] and Qwen2-7B-Instruct [47]. In this paper, we do not fine-tune the LLM because fine-tuning is costly and could cause the LLM to retain user information, potentially compromising user privacy. To ensure a fair comparison, we use greedy search for text generation. The dense retrieval model used in all methods is bge-base-en-v1.5² [45]. The cross-encoder used for reranker in Section 4.3.1 is bge-reranker-base³ [45]. All hyperparameters for the baselines are searched according to the settings in the original papers. The embedding dimension $d$ is set to 768. The number of retrieved documents $k$ is set to 5, and the number of retrieved users $m$ is tuned among $\\{2,3,4,5,6\\}$ . The $\\mathrm{Trm}(\\cdot)$ encoder in Eq. (1) has 1 layer and 2 heads. The hyperparameters $L_{c}, L_{m}$ , and $L_{r}$ used for data augmentation in Section 4.1.2 are set to 0.7, 0.3, and 0.3, respectively. The temperature parameters $\\tau_{1}$ in Eq. (2) is tuned among $\\{0.01, 0.1, 1\\}$ . The weight $\\alpha$ in Eq. (5) is tuned among $[0.01, 1.0]$ . The learning rate is tuned among $\\{1e - 3, 1e - 4, 1e - 5\\}$ . Adam [18] is used to conduct the optimization. The data input and output formats are provided in Appendix A.", + "bbox": [ + 511, + 294, + 913, + 556 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2 Experimental Results", + "text_level": 1, + "bbox": [ + 514, + 568, + 735, + 584 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Experimental results are shown in Table 2. From the results, we can find that:", + "bbox": [ + 513, + 587, + 911, + 613 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Firstly, compared to existing methods, CFRAG achieved the best results across six datasets in the LaMP benchmark. This demonstrates the effectiveness of introducing collaborative information between users into RAG and using LLM feedback to tune the retriever and reranker to ensure that they can retrieve the documents that support the personalized LLM generation.", + "- Secondly, we can observe that even randomly selecting user history outperforms the zero-shot method without any user history. This highlights the importance of incorporating user history to reflect user preferences for personalized generation. Additionally, we observe that retrieval methods perform better than simply selecting the most recent user history, underscoring the importance of retrieval.", + "- Thirdly, we also observe that, in most cases, RAG and ROPG methods using dense retrieval models outperform BM25. Additionally, CFRAG, which fine-tunes the retriever based on LLM feedback, achieves better results. This shows, on the one hand, that the better the retriever, the better the generation results, and on the other" + ], + "bbox": [ + 513, + 614, + 913, + 864 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "SIGIR '25, July 13-18, 2025, Padua, Italy.", + "bbox": [ + 84, + 75, + 279, + 85 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Teng Shi et al.", + "bbox": [ + 841, + 75, + 911, + 87 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "$^{2}$ https://huggingface.co/BAAI/bge-base-en-v1.5", + "bbox": [ + 514, + 872, + 746, + 883 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "3https://huggingface.co/BAAI/bge-eranker-base", + "bbox": [ + 514, + 883, + 748, + 895 + ], + "page_idx": 5 + }, + { + "type": "footer", + "text": "$^{1}$ https://github.com/TengShi-RUC/CFRAG", + "bbox": [ + 84, + 883, + 285, + 895 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/7082bea53d532e5d94a2453a6bded23a4f6f523ae049809dfc0d039c880c149f.jpg", + "table_caption": [ + "Table 2: Comparison of the performance of CFRAG with other approaches on the LaMP benchmark. $\\uparrow$ indicates that a higher value for the corresponding metric is better, while $\\downarrow$ indicates that a lower value is better. The best and the second-best methods are highlighted in bold and underlined fonts, respectively. “*” indicates improvements over the second-best methods are statistically significant ( $t$ -test, $p$ -value $< 0.05$ )." + ], + "table_footnote": [], + "table_body": "
LLMsRetrieversLaMP-1LaMP-2LaMP-3LaMP-4LaMP-5LaMP-7
Accuracy ↑F1 ↑Accuracy ↑F1 ↑MAE ↓RMSE ↓ROUGE-1 ↑ROUGE-L ↑ROUGE-1 ↑ROUGE-L ↑ROUGE-1 ↑ROUGE-L ↑
Llama3Zero Shot0.49930.24970.29930.02000.50240.79040.14060.12280.44170.36500.30790.2593
Random0.57400.28700.39290.02620.41040.78330.17870.15710.45330.38750.31370.2508
Recency0.60400.30200.39930.02660.39800.74910.18560.16500.45730.39280.33250.2686
BM25 [30]0.62400.31200.42550.02840.40600.76660.18030.15910.46370.39780.34490.2780
BGE [45]0.63270.31630.45740.03050.35280.69690.18110.16110.46380.39580.33910.2742
ROPG [31]0.64400.32200.46810.03120.34560.69220.18380.16340.46380.39560.35300.2881
CFRAG0.6533*0.3267*0.5340*0.0356*0.2812*0.5997*0.1957*0.1745*0.4810*0.4153*0.3752*0.3055*
Qwen2Zero Shot0.50000.25000.29080.01940.44440.78050.12640.10810.41440.34680.39720.3229
Random0.56330.28170.32840.02190.40000.76210.15810.13770.45800.39210.42910.3564
Recency0.57730.28870.33260.02220.39120.75630.15810.13690.45620.39130.42470.3525
BM25 [30]0.59870.29930.35320.02350.42280.80270.15800.13740.46130.39500.42900.3570
BGE [45]0.60800.30400.36740.02450.36960.72110.16130.13980.45710.39100.43470.3605
ROPG [31]0.60930.30470.38300.02550.36720.73320.16170.14010.46000.39460.43450.3610
CFRAG0.61330.30670.3957*0.02640.3536*0.7071*0.16210.14120.4703*0.4029*0.4425*0.3708*
", + "bbox": [ + 91, + 169, + 901, + 351 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/4d1923b82576cd1772eb0abb7c69d9b2c56470581b24e15f43d202ce4b7dd758.jpg", + "table_caption": [ + "Table 3: Ablation Study of CFRAG on LaMP based on Llama3. \"MEAN\" represents using the average of user history document embeddings as the user embedding. \"w/o\" indicates the corresponding module in CFRAG is removed." + ], + "table_footnote": [], + "table_body": "
VariantsLaMP-1LaMP-2LaMP-3LaMP-4LaMP-5LaMP-7
#ModelAccuracy ↑F1 ↑Accuracy ↑F1 ↑MAE ↓RMSE ↓ROUGE-1 ↑ROUGE-L ↑ROUGE-1 ↑ROUGE-L ↑ROUGE-1 ↑ROUGE-L ↑
(0)CFRAG0.65330.32670.53400.03560.28120.59970.19570.17450.48100.41530.37520.3055
(1)w/o User Retrieval0.64000.32000.49360.03290.34440.69250.19140.16890.46420.39630.35660.2903
(2)User Retrieval (MEAN)0.64200.32100.50640.03380.34120.68670.18470.16390.47790.41130.37220.3022
(3)w/o Retriever Tuning0.64530.32270.49790.03320.28520.60700.19160.17040.47420.40480.35990.2940
(4)w/o Sretriever in Eq. (5)0.63330.31670.51130.03410.33240.68610.18950.16960.47500.40880.37320.3039
(5)w/o Reranker Tuning0.63070.31530.46950.03130.36960.73920.17660.15500.47140.40680.34320.2775
(6)w/o eu in Eq. (10)0.63130.31570.49930.03330.34200.69250.18870.16720.47720.41230.37310.3030
", + "bbox": [ + 91, + 388, + 903, + 500 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "hand, fine-tuning the retriever based on LLM feedback to ensure it can retrieve the documents that meet the personalized generation needs of LLM is crucial.", + "bbox": [ + 81, + 508, + 482, + 550 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.3 Ablation Study", + "text_level": 1, + "bbox": [ + 83, + 570, + 250, + 585 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We conducted an ablation study to investigate the effectiveness of different modules in CFRAG, as shown in Table 3. CFRAG consists of three modules: User Retrieval, Document Retrieval, and Document Rerank. We removed different modules from CFRAG one by one to verify the effectiveness of each module.", + "bbox": [ + 81, + 588, + 482, + 657 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.3.1 User Retrieval. First, we validated the effectiveness of introducing collaborative information by retrieving similar users, as shown in row (1) of Table 3. It can be seen that without retrieving similar users and only retrieving from the current user's history, the performance is worse than that of CFRAG, highlighting the importance of collaborative information.", + "bbox": [ + 81, + 672, + 482, + 756 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We also validated the effectiveness of training user embeddings using contrastive learning. For comparison, we directly averaged the document embeddings from the user's history to create user embeddings for retrieval, as shown in row (2) of Table 3. It can be seen that CFRAG, which uses user embeddings trained with contrastive learning, achieves better results. This is because contrastive learning constructs user similarity labels through data augmentation and uses the InfoNCE loss to help the embeddings learn which users are similar. In contrast, using mean pooling directly cannot capture user similarity.", + "bbox": [ + 81, + 757, + 482, + 896 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/94633987414c00f738c3423cbe8c6e65847b434df8ed4ae5565aeb3a005e42e8.jpg", + "image_caption": [ + "(a) LaMP-1" + ], + "image_footnote": [], + "bbox": [ + 521, + 511, + 709, + 599 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/79735191f069bd3fe577f6f68843cfd442708060861370a8d9a6cbcee6d995b3.jpg", + "image_caption": [ + "(b) LaMP-5", + "Figure 5: Results of using different methods to select users for introducing collaborative information. \"random\" indicates randomly selecting $m$ users; \"top- $(m - 2m)$ \" represents selecting users whose similarity to the current user ranks between $m$ and $2m$ ; \"top- $m$ \" indicates selecting the most similar $m$ users." + ], + "image_footnote": [], + "bbox": [ + 718, + 511, + 906, + 599 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.3.2 Document Retrieval. We also validated the effectiveness of the personalized retriever we designed, as shown in Table 3, rows (3) and (4). First, in row (3), we can see that without fine-tuning based on LLM feedback, using a pre-trained dense retrieval model leads to worse performance. This indicates that retrieval cannot be based solely on semantic relevance, ensuring that the retrieved documents support personalized LLM generation is crucial. Additionally, we analyzed the impact of removing $S_{u,d}^{\\mathrm{retriever}}$ from Eq. (4) and only using $S_{q,d}^{\\mathrm{retriever}}$ from Eq. (3) for retrieval, as indicated in row (4). The results decreased, demonstrating that users' personalized preferences should also be considered during retrieval, rather", + "bbox": [ + 511, + 736, + 915, + 896 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation", + "bbox": [ + 83, + 75, + 532, + 87 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "SIGIR '25, July 13-18, 2025, Padua, Italy.", + "bbox": [ + 718, + 75, + 913, + 87 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/86faea8a327cef5e2b9ac66cf7e276f3ee4ebff4fccc9a5f5db5d20515b61ee6.jpg", + "image_caption": [ + "(a) LaMP-1" + ], + "image_footnote": [], + "bbox": [ + 89, + 108, + 276, + 196 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e096674f0f80370de56434a59eb14a7c664e0ab43e4a24769d974004128917ef.jpg", + "image_caption": [ + "(b) LaMP-5" + ], + "image_footnote": [], + "bbox": [ + 285, + 108, + 475, + 196 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0fe4c17f7d3b0064ee98d9689518a2b9f9a84baea0031c9d564a3783a1b2fd6b.jpg", + "image_caption": [ + "Figure 6: Results using different retrievers and rerankers. \"BM25\" indicates using BM25 as both the retriever and reranker, while \"w/o Tuning\" refers to using pre-trained retrievers and rerankers without LLM feedback fine-tuning.", + "(a) LaMP-1", + "Figure 7: Performance under different numbers of retrieved documents from the current user $u$ 's history in the top- $k$ documents." + ], + "image_footnote": [], + "bbox": [ + 89, + 297, + 277, + 386 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/20182a7db51c3fa3024198281989434352d72122a26392fd7dce39da64ecec9d.jpg", + "image_caption": [ + "(b) LaMP-5" + ], + "image_footnote": [], + "bbox": [ + 287, + 297, + 475, + 386 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "than solely focusing on the semantic relevance between the query and documents.", + "bbox": [ + 81, + 474, + 482, + 502 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3.3 Document Rerank. We also validated the effectiveness of the personalized reranker we designed, as shown in Table 3, rows (5) and (6). First, in row (5), it can be seen that using a pre-trained reranker leads to worse results, highlighting the importance of fine-tuning based on LLM feedback. We also observed the effect of removing $\\mathbf{e}_u$ from Eq. (10) and only using $\\mathbf{h}_{q,d}$ to calculate $S_{q,d}^{\\text{reranker}}$ for ranking, as indicated in row (6). The results decreased in this case, highlighting the importance of considering users' personalized preferences in the reranker.", + "bbox": [ + 81, + 508, + 482, + 637 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.4 Experimental Analysis", + "text_level": 1, + "bbox": [ + 83, + 648, + 313, + 665 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As mentioned in Section 1, adapting collaborative filtering into personalized RAG faces two challenges. Challenge 1: How to introduce collaborative information? Challenge 2: How to retrieve documents that support personalized LLM generation? In this section, we conduct experimental analysis to further demonstrate the effectiveness of our method in addressing these two challenges. Additionally, we provide further analysis of the results of CFRAG and the impact of hyper-parameters. Due to space limitations, we conducted experimental analysis on the LaMP-1 and LaMP-5 datasets.", + "bbox": [ + 81, + 667, + 482, + 792 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.4.1 Effectiveness of User Retrieval using Contrastive Learning (Challenge 1). As described in Section 1, to address Challenge 1, we train user embeddings using contrastive learning to retrieve the top- $m$ most similar users for introducing collaborative information. To validate the effectiveness of this approach, we compared it with randomly selecting $m$ users and selecting users from top- $m$ to $2m$ , as shown in Figure 5. First, we can see that randomly selecting", + "bbox": [ + 81, + 797, + 482, + 896 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9c481f693eb54eb4efbf0d5c0f2af5aa8f4d150f073794b47f3ad1f2fb237cf5.jpg", + "image_caption": [ + "(a) LaMP-1" + ], + "image_footnote": [], + "bbox": [ + 522, + 109, + 709, + 196 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/acb69f3db4e448b0e25bc42ebbf3669d4e4b97072a06599b923f91b4cb5074dc.jpg", + "image_caption": [ + "(b) LaMP-5" + ], + "image_footnote": [], + "bbox": [ + 718, + 108, + 906, + 196 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/83cde8aa2ad4ce4f601933f54a812e13703cb17f9d80156a716ccbf53ac36f6e.jpg", + "image_caption": [ + "Figure 8: Performance under different numbers of retrieved users. The performance is the worst since no collaborative information is introduced when $m = 1$ .", + "(a) LaMP-1", + "Figure 9: Performance under different numbers of retrieved documents per user." + ], + "image_footnote": [], + "bbox": [ + 521, + 290, + 707, + 378 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/208072a2dd51268962130922f22ad7a74e36f3ec865e34dc65a393a56cbe2a6c.jpg", + "image_caption": [ + "(b) LaMP-5" + ], + "image_footnote": [], + "bbox": [ + 718, + 290, + 906, + 378 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "users yields the worst performance, indicating that collaborative information cannot be introduced indiscriminately. Secondly, the results show that retrieving users from the range of top- $m$ to $2m$ performs worse than using the top- $m$ users, suggesting that information from users who are more similar to the current user $u$ is more important. These highlight the importance of retrieving the most similar top- $m$ users", + "bbox": [ + 511, + 462, + 913, + 559 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.4.2 Effectiveness of Document Retrieval using LLM Feedback (Challenge 2). As mentioned in Section 1, to address Challenge 2, we fine-tune the retriever and reranker using feedback from the content generated by the LLM, enabling them to retrieve documents that better meet personalized LLM generation needs. To validate its effectiveness, we compared the results with those using retrievers and rerankers without LLM feedback fine-tuning, as well as using BM25 as the retriever and reranker, as shown in Figure 6. It can be observed that CFRAG performs the best, highlighting the importance of fine-tuning with LLM feedback rather than relying solely on semantic relevance.", + "bbox": [ + 511, + 568, + 915, + 719 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.4.3 Impact of the Number of Documents from the Current User. To further validate that CFRAG enhances personalization by incorporating collaborative information, we observed the impact of the number of documents from the current user in the final top- $k$ documents on the results, as shown in Figure 7. We varied the number of documents retrieved from the current user's history in the top- $k$ documents from 0 to 5, with the remaining documents retrieved from similar users' histories. The results indicate that retrieving only from the current user's history leads to poor performance, while appropriately retrieving documents from similar users' histories significantly improves the results. This verifies the importance of incorporating collaborative information.", + "bbox": [ + 511, + 729, + 915, + 896 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "SIGIR '25, July 13-18, 2025, Padua, Italy.", + "bbox": [ + 84, + 75, + 279, + 85 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Teng Shi et al.", + "bbox": [ + 841, + 75, + 911, + 85 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/6cea494ce69dd19e4155bbac2bdb81b8052ffdd33a3f5e4661fcc323a21cbc4f.jpg", + "table_caption": [ + "Table 4: The format of input, output, and user history for different datasets in the LaMP [32] benchmark. In the input, $\\{history_{i}\\}$ will be replaced by the retrieved $i$ -th history, and each history is represented as shown in the \"User History\" column. The other italicized text in the input is replaced with the user's input. For text generation tasks, to ensure that the LLM does not generate irrelevant information, we instruct the LLM in the input to generate in JSON format, and then we extract the LLM's prediction from the JSON-formatted output." + ], + "table_footnote": [], + "table_body": "
TaskInputOutputUser History
LaMP-1The historical profiles are as follows: {history1} ... {historyk}. \nBased on the historical profiles provided, please choose one of the following two references that is more relevant to the user's input title: [1] {reference1}; [2] {reference2}. Please just answer with “[1” or “[2” without explanation. “title”: {title}.[1]“title”: {title}\n“abstract”: {abstract}
LaMP-2The historical profiles are as follows: {history1} ... {historyk}. \nBased on the historical profiles provided, please select the tag from [sci-fi, based on a book, comedy ... ] that is most relevant to the user's input description. Please just answer with the tag name without explanation. “description”: {description}; “tag”:comedy“description”: {description};\n“tag”: {tag}
LaMP-3The historical profiles are as follows: {history1} ... {historyk}. \nBased on the historical profiles provided, what is the score of the following review on a scale of 1 to 5? just answer with 1, 2, 3, 4, or 5 without further explanation. “review”: {review}; “score”:5“review”: {review}\n“score”: {score}
LaMP-4The historical profiles are as follows: {history1} ... {historyk}. \nBased on the historical profiles provided, please generate a title for the given user's input text. Please generate it in the following format: {"title": "generated title"} without explanation, and use only English. “text”: {text}; “title”:{“title”: Finding Happiness \nAfter Divorce - It Can Happen}“text”: {text}\n“title”: {title}
LaMP-5The historical profiles are as follows: {history1} ... {historyk}. \nBased on the historical profiles provided, please generate a title for the given user's input abstract. Please generate it in the following format: {"title": "generated title"} without explanation, and use only English. “abstract”: {abstract}; “title”:{“title”: Link-Reliability Based \nTwo-Hop Routing for \nWireless Sensor Networks.}“abstract”: {abstract}\n“title”: {title}
LaMP-7The historical profiles are as follows: {history1} ... {historyk}. \nBased on the style pattern of the historical tweets provided, please paraphrase the user's input tweet without any explanation before or after it. Please generate it in the following format: {"tweet": "generated tweet"} without explanation, and use only English. “tweet”: {tweet}.{“tweet”:lilxcutiesworld the \ndanny picture is GOOD!! \nI really like it.}“tweet”: {tweet}
", + "bbox": [ + 93, + 178, + 906, + 621 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.4.4 Impact of the Number of Retrieved Users. Since we enhance personalized text generation by introducing collaborative filtering, we further explored how much collaborative information to introduce, specifically the impact of the number of retrieved users on the results, as shown in Figure 8. In LaMP-1, retrieving too few or too many users leads to poorer performance, with the best results at 4 users. In LaMP-5, the performance improves as the number of users increases. This highlights the importance of introducing collaborative filtering, but it also indicates that excessive introduction can lead to decreased effectiveness.", + "bbox": [ + 81, + 631, + 482, + 770 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.4.5 Impact of the Number of Retrieved Documents. We also analyzed the impact of the number of retrieved documents, $k$ , on the results, as shown in Figure 9. It can be observed that as the number of retrieved documents increases, performance improves, indicating the importance of retrieving user history to reflect user preferences for enhancing LLM-generated results. Since more documents lead to longer prompts and slower LLM generation, we chose $k = 5$ for our experiments.", + "bbox": [ + 81, + 785, + 482, + 896 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 514, + 630, + 638, + 643 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this paper, we propose CFRAG, which adapts collaborative filtering into RAG to personalize LLMs. To introduce collaborative information without explicit user labels and retrieve documents that support personalized LLM generation, we first train user embeddings through contrastive learning to retrieve similar users. Then, we design the personalized retriever and reranker that considers user preferences during retrieval and fine-tune them using LLM feedback. The results on the Language Model Personalization (LaMP) benchmark validate the effectiveness of CFRAG. The experimental analysis also confirms the effectiveness of each module within CFRAG.", + "bbox": [ + 511, + 648, + 915, + 800 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "A Appendix: Prompts", + "text_level": 1, + "bbox": [ + 514, + 849, + 709, + 864 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We provide detailed formats for the inputs, outputs, and user histories for the LLM across different datasets, as shown in Table 4.", + "bbox": [ + 513, + 867, + 915, + 895 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation", + "bbox": [ + 83, + 75, + 532, + 87 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "SIGIR '25, July 13-18, 2025, Padua, Italy.", + "bbox": [ + 718, + 75, + 913, + 87 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 84, + 104, + 176, + 119 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] AI@Meta. 2024. Llama 3 Model Card. (2024). https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md", + "[2] Akari Asai, Zegiu Wu, Yizhong Wang, Avirup Sil, and Hannaneh Hajishirzi. [n.d.]. Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection. In The Twelfth International Conference on Learning Representations.", + "[3] Sebastian Borgeaud, Arthur Mensch, et al. 2022. Improving language models by retrieving from trillions of tokens. In International conference on machine learning, PMLR, 2206-2240.", + "[4] Jin Chen, Zheng Liu, et al. 2024. When large language models meet personalization: Perspectives of challenges and opportunities. World Wide Web 27, 4 (2024), 42.", + "[5] Sunhao Dai, Ninglu Shao, et al. 2023. Uncovering chatgpt's capabilities in recommender systems. In Proceedings of the 17th ACM Conference on Recommender Systems. 1126-1132.", + "[6] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers).", + "[7] Wenqi Fan, Yujuan Ding, Liangbo Ning, Shijie Wang, Hengyun Li, Dawei Yin, Tat-Seng Chua, and Qing Li. 2024. A Survey on RAG Meeting LLMs: Towards Retrieval-Augmented Large Language Models. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, 6491-6501.", + "[8] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, and Haofen Wang. 2023. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997 (2023).", + "[9] Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. 2020. Retrieval augmented language model pre-training. In International conference on machine learning. PMLR, 3929-3938.", + "[10] Xiangnan He, Kuan Deng, Xiang Wang, Yan Li, Yongdong Zhang, and Meng Wang. 2020. Lightgcn: Simplifying and powering graph convolution network for recommendation. In Proceedings of the 43rd International ACM SIGIR conference on research and development in Information Retrieval. 639-648.", + "[11] Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. 2017. Neural collaborative filtering. In Proceedings of the 26th international conference on world wide web. 173-182.", + "[12] Edward J Hu, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. [n.d.]. LoRA: Low-Rank Adaptation of Large Language Models. In International Conference on Learning Representations.", + "[13] Gautier Izacard and Edouard Grave. 2021. Leveraging Passage Retrieval with Generative Models for Open Domain Question Answering. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume. 874-880.", + "[14] Gautier Izacard, Patrick Lewis, et al. 2022. Few-shot learning with retrieval augmented language models. arXiv preprint arXiv:2208.03299 1, 2 (2022), 4.", + "[15] Ashish Jaiswal, Ashwin Ramesh Babu, Mohammad Zaki Zadeh, Debapriya Banerjee, and Fillia Makedon. 2020. A survey on contrastive self-supervised learning. Technologies 9, 1 (2020), 2.", + "[16] Joel Jang, Seungone Kim, et al. 2023. Personalized soups: Personalized large language model alignment via post-hoc parameter merging. arXiv preprint arXiv:2310.11564 (2023).", + "[17] Nikhil Kandpal, Haikang Deng, Adam Roberts, Eric Wallace, and Colin Raffel. 2023. Large language models struggle to learn long-tail knowledge. In International Conference on Machine Learning. PMLR, 15696-15707.", + "[18] Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014).", + "[19] Yehuda Koren, Robert Bell, and Chris Volinsky. 2009. Matrix factorization techniques for recommender systems. Computer 42, 8 (2009), 30-37.", + "[20] Patrick Lewis, Ethan Perez, et al. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems 33 (2020), 9459-9474.", + "[21] Cheng Li, Mingyang Zhang, Qiaozhu Mei, Yaqing Wang, Spurthi Amba Hombaiah, Yi Liang, and Michael Bendersky. 2023. Teach LLMs to Personalize-An Approach inspired by Writing Education. arXiv preprint arXiv:2308.07968 (2023).", + "[22] Junyi Li, Tianyi Tang, Wayne Xin Zhao, Jian-Yun Nie, and Ji-Rong Wen. 2024. Pre-trained language models for text generation: A survey. Comput. Surveys 56, 9 (2024), 1-39.", + "[23] Xinyu Li, Zachary C Lipton, and Liu Leqi. 2024. Personalized language modeling from personalized human feedback. arXiv preprint arXiv:2402.05133 (2024).", + "[24] Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out. 74-81.", + "[25] Xi Victoria Lin, Xilun Chen, Mingda Chen, Weijia Shi, Maria Lomeli, Richard James, Pedro Rodriguez, Jacob Kahn, Gergely Szilvasy, Mike Lewis, et al. [n.d.]. RA-DIT: Retrieval-Augmented Dual Instruction Tuning. In The Twelfth International Conference on Learning Representations.", + "[26] Yinhan Liu. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692 (2019)." + ], + "bbox": [ + 86, + 122, + 482, + 896 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[27] Sheshera Mysore, Zhuoran Lu, et al. 2023. Pearl: Personalizing large language model writing assistants with generation-calibrated retrievers. arXiv preprint arXiv:2311.09180 (2023).", + "[28] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. 2018. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748 (2018).", + "[29] Chris Richardson, Yao Zhang, Kellen Gillespie, Sudipta Kar, Arshdeep Singh, Zeynab Raeesy, Omar Zia Khan, and Abhinav Sethy. 2023. Integrating summarization and retrieval for enhanced personalization via large language models. arXiv preprint arXiv:2310.20081 (2023).", + "[30] Stephen E Robertson, Steve Walker, Susan Jones, Micheline M Hancock-Beaulieu, Mike Gatford, et al. 1995. Okapi at TREC-3. Nist Special Publication Sp 109 (1995), 109.", + "[31] Alireza Salemi, Surya Kallumadi, and Hamed Zamani. 2024. Optimization methods for personalizing large language models through retrieval augmentation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 752-762.", + "[32] Alireza Salemi, Sheshera Mysore, Michael Bendersky, and Hamed Zamani. 2023. Lamp: When large language models meet personalization. arXiv preprint arXiv:2304.11406 (2023).", + "[33] Chenglei Shen, Xiao Zhang, Teng Shi, Changshuo Zhang, Guofu Xie, and Jun Xu. 2024. A survey of controllable learning: Methods and applications in information retrieval. arXiv preprint arXiv:2407.06083 (2024).", + "[34] Teng Shi, Zihua Si, Jun Xu, Xiao Zhang, Xiaoxue Zang, Kai Zheng, Dewei Leng, Yanan Niu, and Yang Song. 2024. UniSAR: Modeling User Transition Behaviors between Search and Recommendation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1029-1039.", + "[35] Weijia Shi, Sewon Min, Michihiro Yasunaga, Minjoon Seo, Richard James, Mike Lewis, Luke Zettlemoyer, and Wen-tau Yih. 2024. REPLUG: Retrieval-Augmented Black-Box Language Models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers). 8364-8377.", + "[36] Zhaoxuan Tan, Zheyuan Liu, and Meng Jiang. 2024. Personalized Pieces: Efficient Personalized Large Language Models through Collaborative Efforts. arXiv preprint arXiv:2406.10471 (2024).", + "[37] Zhaoxuan Tan, Qingkai Zeng, Yijun Tian, Zheyuan Liu, Bing Yin, and Meng Jiang. 2024. Democratizing Large Language Models via Personalized Parameter-Efficient Fine-tuning. arXiv:2402.04401 [cs.CL] https://arxiv.org/abs/2402.04401", + "[38] Jiakai Tang, Sunhao Dai, Teng Shi, Jun Xu, Xu Chen, Wen Chen, Wu Jian, and Yuning Jiang. 2025. Think Before Recommend: Unleashing the Latent Reasoning Power for Sequential Recommendation. arXiv:2503.22675 [cs.IR] https://arxiv.org/abs/2503.22675", + "[39] A Vaswani. 2017. Attention is all you need. Advances in Neural Information Processing Systems (2017).", + "[40] Xiang Wang, Xiangnan He, Meng Wang, Fuli Feng, and Tat-Seng Chua. 2019. Neural graph collaborative filtering. In Proceedings of the 42nd international ACM SIGIR conference on Research and development in Information Retrieval. 165-174.", + "[41] Likang Wu, Zhi Zheng, Zhaopeng Qiu, Hao Wang, Hongchao Gu, Tingjia Shen, Chuan Qin, Chen Zhu, Hengshu Zhu, Qi Liu, et al. 2024. A survey on large language models for recommendation. World Wide Web 27, 5 (2024), 60.", + "[42] Xinghao Wu, Xuefeng Liu, Jianwei Niu, Haolin Wang, Shaojie Tang, and Guogang Zhu. 2024. FedLoRA: When Personalized Federated Learning Meets Low-Rank Adaptation. (2024).", + "[43] Zeqiu Wu, Yushi Hu, Weijia Shi, Nouha Dziri, Alane Suhr, Prithviraj Ammanabrolu, Noah A Smith, Mari Ostendorf, and Hannaneh Hajishirzi. 2024. Fine-grained human feedback gives better rewards for language model training. Advances in Neural Information Processing Systems 36 (2024).", + "[44] Zhuofeng Wu, Sinong Wang, Jiatao Gu, Madian Khabsa, Fei Sun, and Hao Ma. 2020. Clear: Contrastive learning for sentence representation. arXiv preprint arXiv:2012.15466 (2020).", + "[45] Shitao Xiao, Zheng Liu, Peitian Zhang, and Niklas Muennighoff. 2023. C-Pack: Packaged Resources To Advance General Chinese Embedding. arXiv:2309.07597 [cs.CL]", + "[46] Hong-Jian Xue, Xinyu Dai, Jianbing Zhang, Shujian Huang, and Jiajun Chen. 2017. Deep matrix factorization models for recommender systems.. In IJCAI, Vol. 17. Melbourne, Australia, 3203-3209.", + "[47] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Cheng-peng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. 2024. Qwen2 technical report. arXiv preprint arXiv:2407.10671 (2024).", + "[48] Changshuo Zhang, Teng Shi, Xiao Zhang, Qi Liu, Ruobing Xie, Jun Xu, and Ji-Rong Wen. 2024. Modeling Domain and Feedback Transitions for Cross-Domain Sequential Recommendation. arXiv preprint arXiv:2408.08209 (2024).", + "[49] Changshuo Zhang, Teng Shi, Xiao Zhang, Yanping Zheng, Ruobing Xie, Qi Liu, Jun Xu, and Ji-Rong Wen. 2024. QAGCF: Graph Collaborative Filtering for Q&A Recommendation. arXiv preprint arXiv:2406.04828 (2024).", + "[50] Changshuo Zhang, Xiao Zhang, Teng Shi, Jun Xu, and Ji-Rong Wen. 2025. Test-Time Alignment for Tracking User Interest Shifts in Sequential Recommendation. arXiv:2504.01489 [cs.IR] https://arxiv.org/abs/2504.01489" + ], + "bbox": [ + 516, + 108, + 911, + 893 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "SIGIR '25, July 13-18, 2025, Padua, Italy.", + "bbox": [ + 84, + 75, + 279, + 87 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Teng Shi et al.", + "bbox": [ + 841, + 75, + 911, + 87 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[51] Kepu Zhang, Teng Shi, Sunhao Dai, Xiao Zhang, Yinfeng Li, Jing Lu, Xiaoxue Zang, Yang Song, and Jun Xu. 2024. SAQRec: Aligning Recommender Systems to User Satisfaction via Questionnaire Feedback. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 3165-3175.", + "[52] Xiao Zhang, Teng Shi, Jun Xu, Zhenhua Dong, and Ji-Rong Wen. 2024. Model-Agnostic Causal Embedding Learning for Counterfactually Group-Fair Recommendation. IEEE Transactions on Knowledge and Data Engineering (2024).", + "[53] Yue Zhang, Yafu Li, Leyang Cui, Deng Cai, Lemao Liu, Tingchen Fu, Xinting Huang, Enbo Zhao, Yu Zhang, Yulong Chen, et al. 2023. Siren's song in the AI ocean: a survey on hallucination in large language models. arXiv preprint arXiv:2309.01219 (2023)." + ], + "bbox": [ + 84, + 108, + 483, + 220 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[54] Wayne Xin Zhao, Jing Liu, Ruiyang Ren, and Ji-Rong Wen. 2024. Dense text retrieval based on pretrained language models: A survey. ACM Transactions on Information Systems 42, 4 (2024), 1-60.", + "[55] Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, et al. 2023. A survey of large language models. arXiv preprint arXiv:2303.18223 (2023).", + "[56] Yutao Zhu, Huaying Yuan, Shuting Wang, Jiongnan Liu, Wenhan Liu, Chenlong Deng, Haonan Chen, Zhicheng Dou, and Ji-Rong Wen. 2023. Large language models for information retrieval: A survey. arXiv preprint arXiv:2308.07107 (2023).", + "[57] Yuchen Zhuang, Haotian Sun, Yue Yu, Qifan Wang, Chao Zhang, and Bo Dai. 2024. HYDRA: Model Factorization Framework for Black-Box LLM Personalization. arXiv preprint arXiv:2406.02888 (2024)." + ], + "bbox": [ + 516, + 108, + 913, + 229 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation", + "bbox": [ + 84, + 75, + 532, + 87 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "SIGIR '25, July 13-18, 2025, Padua, Italy.", + "bbox": [ + 718, + 75, + 911, + 87 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05731/1d2e0a8a-e48a-4cb6-8ec0-35774101eda9_model.json b/data/2025/2504_05xxx/2504.05731/1d2e0a8a-e48a-4cb6-8ec0-35774101eda9_model.json new file mode 100644 index 0000000000000000000000000000000000000000..04855b23b8abd6965a2cd8214ee8d75c17c2b331 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/1d2e0a8a-e48a-4cb6-8ec0-35774101eda9_model.json @@ -0,0 +1,3104 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.28, + 0.061, + 0.701 + ], + "angle": 270, + "content": "arXiv:2504.05731v1 [cs.IR] 8 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.101, + 0.915, + 0.15 + ], + "angle": 0, + "content": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation" + }, + { + "type": "text", + "bbox": [ + 0.26, + 0.158, + 0.335, + 0.174 + ], + "angle": 0, + "content": "Teng Shi" + }, + { + "type": "text", + "bbox": [ + 0.204, + 0.176, + 0.394, + 0.19 + ], + "angle": 0, + "content": "Renmin University of China" + }, + { + "type": "text", + "bbox": [ + 0.25, + 0.191, + 0.347, + 0.205 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.231, + 0.206, + 0.366, + 0.22 + ], + "angle": 0, + "content": "shiteng@ruc.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.24, + 0.249, + 0.356, + 0.266 + ], + "angle": 0, + "content": "Xiaoxue Zang" + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.267, + 0.341, + 0.284 + ], + "angle": 0, + "content": "Kai Zheng" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.284, + 0.402, + 0.298 + ], + "angle": 0, + "content": "Kuaishou Technology Co., Ltd." + }, + { + "type": "text", + "bbox": [ + 0.249, + 0.299, + 0.346, + 0.313 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.314, + 0.359, + 0.327 + ], + "angle": 0, + "content": "xxic666@126.com" + }, + { + "type": "text", + "bbox": [ + 0.223, + 0.329, + 0.373, + 0.344 + ], + "angle": 0, + "content": "zhengk92@gmail.com" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.353, + 0.158, + 0.367 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.371, + 0.483, + 0.718 + ], + "angle": 0, + "content": "Recently, the personalization of Large Language Models (LLMs) to generate content that aligns with individual user preferences has garnered widespread attention. Personalized Retrieval-Augmented Generation (RAG), which retrieves relevant documents from the user's history to reflect their preferences and enhance LLM generation, is one commonly used approach for personalization. However, existing personalized RAG methods do not consider that the histories of similar users can also assist in personalized generation for the current user, meaning that collaborative information between users can also benefit personalized generation. Inspired by the application of collaborative filtering in recommender systems, we propose a method called CFRAG, which adapts Collaborative Filtering to RAG for personalized text generation. However, this presents two challenges: (1) how to incorporate collaborative information without explicit user similarity labels? (2) how to retrieve documents that support personalized LLM generation? For Challenge 1, we use contrastive learning to train user embeddings to retrieve similar users and introduce collaborative information. For Challenge 2, we design a personalized retriever and reranker to retrieve the top-\\(k\\) documents from these users' histories. We take into account the user's preference during retrieval and reranking. Then we leverage feedback from the LLM to fine-tune the personalized retriever and reranker, enabling them to retrieve documents that meet the personalized generation needs of the LLM. Experimental results on the Language Model Personalization (LaMP) benchmark" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.732, + 0.483, + 0.765 + ], + "angle": 0, + "content": "*Corresponding authors. Work partially done at Engineering Research Center of Next-Generation Intelligent Search and Recommendation, Ministry of Education. \nWork done when Teng Shi was the intern at Kuaishou." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.78, + 0.483, + 0.853 + ], + "angle": 0, + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.854, + 0.192, + 0.864 + ], + "angle": 0, + "content": "SIGIR '25, Padua, Italy." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.865, + 0.473, + 0.885 + ], + "angle": 0, + "content": "© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM. ACM ISBN 979-8-4007-1592-1/25/07" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.885, + 0.29, + 0.896 + ], + "angle": 0, + "content": "https://doi.org/10.1145/XXXXXX.XXXXXXX" + }, + { + "type": "text", + "bbox": [ + 0.667, + 0.158, + 0.732, + 0.174 + ], + "angle": 0, + "content": "Jun Xu*" + }, + { + "type": "text", + "bbox": [ + 0.653, + 0.176, + 0.749, + 0.193 + ], + "angle": 0, + "content": "Xiao Zhang" + }, + { + "type": "text", + "bbox": [ + 0.607, + 0.193, + 0.796, + 0.207 + ], + "angle": 0, + "content": "Renmin University of China" + }, + { + "type": "text", + "bbox": [ + 0.653, + 0.208, + 0.75, + 0.222 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.6, + 0.223, + 0.801, + 0.237 + ], + "angle": 0, + "content": "{junxu,zhangx89}@ruc.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.657, + 0.249, + 0.744, + 0.266 + ], + "angle": 0, + "content": "Yang Song" + }, + { + "type": "text", + "bbox": [ + 0.671, + 0.268, + 0.729, + 0.281 + ], + "angle": 0, + "content": "Han Li" + }, + { + "type": "text", + "bbox": [ + 0.598, + 0.284, + 0.804, + 0.298 + ], + "angle": 0, + "content": "Kuaishou Technology Co., Ltd." + }, + { + "type": "text", + "bbox": [ + 0.652, + 0.299, + 0.749, + 0.313 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.652, + 0.314, + 0.749, + 0.328 + ], + "angle": 0, + "content": "ys@sonyis.me" + }, + { + "type": "text", + "bbox": [ + 0.621, + 0.329, + 0.78, + 0.342 + ], + "angle": 0, + "content": "lihan08@kuaishou.com" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.354, + 0.914, + 0.383 + ], + "angle": 0, + "content": "validate the effectiveness of CFRAG. Further analysis confirms the importance of incorporating collaborative information." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.395, + 0.634, + 0.411 + ], + "angle": 0, + "content": "CCS Concepts" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.414, + 0.928, + 0.442 + ], + "angle": 0, + "content": "- Information systems \\(\\rightarrow\\) Personalization; - Computing methodologies \\(\\rightarrow\\) Natural language generation." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.455, + 0.6, + 0.47 + ], + "angle": 0, + "content": "Keywords" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.473, + 0.915, + 0.5 + ], + "angle": 0, + "content": "Large language model; Personalization; Retrieval augmented generation" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.509, + 0.662, + 0.52 + ], + "angle": 0, + "content": "ACM Reference Format:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.522, + 0.915, + 0.597 + ], + "angle": 0, + "content": "Teng Shi, Jun Xu, Xiao Zhang, Xiaoxue Zang, Kai Zheng, Yang Song, and Han Li. 2025. Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation. In Proceedings of the 48th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '25), July 13-18, 2025, Padua, Italy. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/XXXXXX.XXXXXXX" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.615, + 0.651, + 0.629 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.633, + 0.915, + 0.716 + ], + "angle": 0, + "content": "Personalizing Large Language Models (LLMs) [55] to generate personalized outputs tailored to individual user preferences has emerged as a significant and rapidly growing field [16, 23, 29, 31, 32, 36, 37, 57]. Personalized Retrieval-Augmented Generation (RAG) [8] has become a commonly used approach for personalizing LLMs [29, 31, 32, 57]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.717, + 0.915, + 0.897 + ], + "angle": 0, + "content": "The process of existing personalized RAG methods typically involves retrieving similar documents from the user's historical behaviors based on the user's input query, then concatenating these documents with the query as a prompt input to the LLM for generation. Although effective, this approach is limited to retrieving only the current user's history, neglecting collaborative information. Users with similar histories tend to be more alike, and the information from these similar users can also aid in personalizing generation for the current user. As shown in the example in Figure 1, the upper part illustrates the results of the existing RAG method, which retrieves documents from the current user's history. We can only infer from these results that \"She\" in the user's input refers to \"Hillary Clinton\". In contrast, the lower part demonstrates" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.28, + 0.087 + ], + "angle": 0, + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + }, + { + "type": "header", + "bbox": [ + 0.843, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "Teng Shi et al." + }, + { + "type": "image", + "bbox": [ + 0.107, + 0.104, + 0.885, + 0.349 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.352, + 0.915, + 0.422 + ], + "angle": 0, + "content": "Figure 1: An example from the LaMP-4 dataset [32]. The task of LaMP-4 is to generate personalized news headlines based on user input. This example illustrates the benefit of collaborative information for LLM personalization: (a) The top shows results retrieved by the existing RAG method from the current user's history, where we can only infer that \"She\" in the user's input refers to \"Hillary Clinton\". (b) The bottom shows results retrieved by our method from similar users' histories, allowing us to infer further that \"his\" in the user's input refers to \"Donald Trump\" thus enabling the generation of a more accurate result." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.432, + 0.483, + 0.515 + ], + "angle": 0, + "content": "our method, which retrieves documents from the history of similar users. In this case, we can further infer that \"his\" in the user's input refers to \"Donald Trump\", leading to a better generation result. From this example, we can see that incorporating collaborative information allows the retrieval of more diverse documents, helping the LLM generate results that better meet the user's needs." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.515, + 0.483, + 0.709 + ], + "angle": 0, + "content": "Inspired by the application of collaborative filtering in recommender systems [11, 40, 46], we propose to adapt collaborative information into RAG to personalize LLMs. However, adapting collaborative filtering to personalized RAG presents two challenges. Challenge 1: How to incorporate collaborative information. Without explicit labels indicating which users are similar, which users' information should be selected to help personalize generation for the current user? Challenge 2: How to retrieve documents that support personalized LLM generation, rather than relying on traditional semantic relevance? Pre-trained dense retrieval models [54] only retrieve based on the semantic relevance between the query and document. Directly using these models for retrieval may not necessarily result in content that allows the LLM to generate outputs that meet the user's needs [25, 35]." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.709, + 0.483, + 0.89 + ], + "angle": 0, + "content": "To address the above challenges, this paper proposes a method named CFRAG which adapts Collaborative Filtering to personalized Retrieval Augmented Generation. Firstly, to address Challenge 1, since there are no explicit user similarity labels, we use contrastive learning [15, 44] to train user embeddings for retrieving similar users to introduce collaborative information. Specifically, we apply different data augmentation methods to the user's history to obtain different views, and then treat different views of the same user's history as positive samples for each other. Then we use contrastive learning on different views to train the user embeddings. Secondly, for Challenge 2, we designed a personalized retriever and reranker to retrieve the top-\\(k\\) documents from the histories of the retrieved users. In both retrieval and reranking, in addition to the semantic" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.432, + 0.915, + 0.543 + ], + "angle": 0, + "content": "relevance between the query and documents, we also considered the user's preferences for different documents to enable personalized retrieval. Additionally, we further fine-tune the retriever and reranker based on the feedback from the LLM to ensure that the retrieved documents better support the personalized LLM generation. Finally, the top-\\(k\\) documents are concatenated with the user's input query to form a prompt, which is then fed into the LLM for personalized generation." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.543, + 0.915, + 0.613 + ], + "angle": 0, + "content": "The major contributions of the paper are summarized as follows: We analyzed the necessity of introducing collaborative filtering into RAG for LLM personalization and identified the challenges: how to introduce collaborative information and how to retrieve documents that support personalized LLM generation." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.612, + 0.915, + 0.681 + ], + "angle": 0, + "content": "- We proposed a method called CFRAG, which uses contrastive learning to train user embeddings for retrieving similar users and incorporating collaborative information. It leverages LLM feedback to train the personalized retriever and reranker, enabling them to retrieve documents that support personalized LLM generation." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.681, + 0.915, + 0.737 + ], + "angle": 0, + "content": "- Experimental results on the Language Model Personalization (LaMP) [32] benchmark validate the effectiveness of CFRAG. The experimental analysis also demonstrates the importance of leveraging collaborative information." + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.612, + 0.915, + 0.737 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.763, + 0.661, + 0.777 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.786, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Personalization of LLMs. Large Language Models (LLMs) [55] have demonstrated remarkable capabilities in various fields, such as text generation [22], information retrieval [56], recommender systems [5, 41], and so on. However, since LLMs are typically designed to serve all tasks with a single model and are trained on broad, domain-agnostic data, they face challenges in adapting to the personalized needs of individual users [4, 32]. Therefore, LLM personalization has attracted widespread attention [16, 31, 57]." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.533, + 0.088 + ], + "angle": 0, + "content": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation" + }, + { + "type": "header", + "bbox": [ + 0.719, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + }, + { + "type": "image", + "bbox": [ + 0.11, + 0.106, + 0.881, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.081, + 0.339, + 0.916, + 0.395 + ], + "angle": 0, + "content": "Figure 2: The architecture of CFRAG. From left to right: (a) User Retrieval retrieves similar users (Section 4.1); (b) Retriever retrieves the top- \\(k\\) documents from each user's history (Section 4.2); (c) Reranker reranks the \\(m \\times k\\) documents to get the final top- \\(k\\) documents, which are then concatenated with the query and input into the LLM for personalized text generation (Section 4.3)." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.405, + 0.484, + 0.599 + ], + "angle": 0, + "content": "Existing works on LLM personalization mainly include the following types of methods: (1) Fine-tuning a personalized LLM for each user [36, 37, 42]; Tan et al. [37] fine-tuned the LLM using LoRA [12] to get personalized LoRA parameters for each user. (2) Aligning LLMs with user-specific preferences through Reinforcement Learning from Human Feedback (RLHF) [16, 23, 43]; Jang et al. [16] first trained different parameters for various objectives using RLHF, then merged these parameters based on users' personalized needs. (3) Incorporating user-specific context into the prompt [21, 27, 29, 31, 32, 57]. Richardson et al. [29] used instruction-tuned LLMs to summarize user history and then incorporated it into prompts for generation. Salemi et al. [31, 32] used RAG to retrieve relevant documents from user history based on the input query and incorporated them into the prompt." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.599, + 0.484, + 0.765 + ], + "angle": 0, + "content": "This paper further introduces collaborative filtering for personalization based on the RAG framework. Collaborative filtering has already been applied in fields such as recommender systems [33, 34, 38, 48-52] and has been proven effective. It assumes that users who have interacted with similar items share similar preferences, and recommending items from similar users to the current user can meet their needs. Some works [11, 46] learn the collaborative information between users and items through matrix factorization [19], while others [10, 40] further explore higher-order collaborative information between users and items using graph neural networks. The application of collaborative filtering in LLM personalization remains under-explored." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.768, + 0.484, + 0.894 + ], + "angle": 0, + "content": "Retrieval Augmented Generation. Retrieval Augmented Generation [7, 8] introduces external knowledge through document retrieval, alleviating issues such as LLM hallucinations [53], and enhancing LLMs' capabilities in knowledge-intensive tasks [17] such as open-domain question answering [14, 20]. Some works [3, 13] encode retrieved documents using separate encoders, and then fuse the results with the language model using cross-attention. A more common approach is to directly include the retrieved documents in the prompt of the LLM [2, 9, 20, 25, 35]. In recent years, this" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.405, + 0.918, + 0.462 + ], + "angle": 0, + "content": "in-context RAG framework has also been applied to LLM personalization, which is personalized by retrieving documents from the user's history [31, 32, 57]. This paper introduces collaborative filtering by retrieving similar users' histories for better personalization." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.472, + 0.723, + 0.487 + ], + "angle": 0, + "content": "3 Problem Formulation" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.491, + 0.916, + 0.606 + ], + "angle": 0, + "content": "Let \\(\\mathcal{U} = \\{u_1, u_2, \\ldots, u_M\\}\\) denotes the set of all users, where \\(M\\) is the number of users. Each user \\(u \\in \\mathcal{U}\\) has a chronologically ordered history \\(\\mathcal{H}_u = [d_1, d_2, \\ldots, d_N]\\) which includes all her historical documents, where \\(N\\) is the number of documents in the history. The personalized text generation dataset is \\(\\mathcal{D} = \\{(u, q, y)_i\\}_{i=1}^{|D|}\\). For each instance, \\(q\\) is the query input by the user \\(u\\) to the LLM, and \\(y\\) is the target output. Our goal is first to introduce collaborative information by retrieving the top-\\(m\\) most similar users for user \\(u\\):" + }, + { + "type": "equation", + "bbox": [ + 0.625, + 0.611, + 0.804, + 0.626 + ], + "angle": 0, + "content": "\\[\n\\mathcal {U} _ {\\text {r e t r i e v e d}} = \\left\\{u _ {1}, u _ {2}, \\dots , u _ {m} \\right\\}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.632, + 0.914, + 0.659 + ], + "angle": 0, + "content": "Then, we use a retriever to retrieve the top-\\(k\\) documents from each of the \\(m\\) users' histories, resulting in a total of \\(m \\times k\\) documents." + }, + { + "type": "equation", + "bbox": [ + 0.57, + 0.666, + 0.858, + 0.681 + ], + "angle": 0, + "content": "\\[\n\\mathcal {D} _ {\\text {r e t r i e v e d}} = \\{d _ {i, j} | i \\in \\{1, \\dots , m \\}, j \\in \\{1, \\dots , k \\} \\}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.686, + 0.914, + 0.714 + ], + "angle": 0, + "content": "Finally, we use a reranker to rerank these \\(m \\times k\\) documents and obtain the final top-\\(k\\) documents:" + }, + { + "type": "equation", + "bbox": [ + 0.619, + 0.72, + 0.81, + 0.735 + ], + "angle": 0, + "content": "\\[\n\\mathcal {D} _ {\\text {r e r a n k e d}} = \\left\\{d _ {i} | i \\in \\{1, \\dots , k \\} \\right\\}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.74, + 0.913, + 0.782 + ], + "angle": 0, + "content": "These top-\\(k\\) documents will be concatenated with the user's query \\(q\\) as a prompt and input into the LLM, enabling it to generate a response that aligns with the target output \\(y\\)." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.782, + 0.914, + 0.839 + ], + "angle": 0, + "content": "This paper primarily focuses on how to retrieve \\(\\mathcal{U}_{\\mathrm{retrieved}}\\) to introduce collaborative information, and how to train the retriever and reranker so that they can effectively retrieve documents that support the personalized LLM generation." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.85, + 0.664, + 0.866 + ], + "angle": 0, + "content": "4 Our Approach" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.868, + 0.916, + 0.897 + ], + "angle": 0, + "content": "This section introduces our method CFRAG. CFRAG's overall architecture is shown in Figure 2. As mentioned in Section 1, to address" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.28, + 0.087 + ], + "angle": 0, + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + }, + { + "type": "header", + "bbox": [ + 0.843, + 0.076, + 0.913, + 0.087 + ], + "angle": 0, + "content": "Teng Shi et al." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.484, + 0.246 + ], + "angle": 0, + "content": "Challenge 1, i.e., how to introduce collaborative information, we first train user embeddings using contrastive learning to retrieve the top-\\(m\\) most similar users (see Section 4.1). For Challenge 2, which involves retrieving documents that support personalized LLM generation, we fine-tune the personalized retriever and reranker using LLM feedback. The retriever first retrieves the top-\\(k\\) documents from the history of each of the \\(m\\) users, resulting in \\(m \\times k\\) documents (see Section 4.2). The reranker then reranks these documents to obtain the final top-\\(k\\) documents as input for the LLM (see Section 4.3)." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.259, + 0.245, + 0.273 + ], + "angle": 0, + "content": "4.1 User Retrieval" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.278, + 0.484, + 0.418 + ], + "angle": 0, + "content": "First, we perform user retrieval to get the top-\\(m\\) most similar users for user \\(u\\) to introduce collaborative information. However, we do not have labels indicating which users are similar to each other. To address this, we employ a contrastive learning [15, 44] approach. We apply different data augmentation methods to the user history \\(\\mathcal{H}_u\\) to obtain different views of the user's history. We treat different views of the same user as positive samples and the histories of other users as negative samples, and then we use the InfoNCE [28] loss to train user embeddings for retrieval. Figure 3 illustrates the process of training user embeddings using contrastive learning." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.425, + 0.484, + 0.567 + ], + "angle": 0, + "content": "4.1.1 User Encoder. Specifically, we first use an embedding model (such as BERT [6], RoBERTa [26], BGE [45] etc.) \\(\\mathbf{Emb}(\\cdot)\\) to encode each document in the user's history \\(\\mathcal{H}_u\\) to obtain \\(\\mathbf{E}_u = [\\mathbf{e}_1,\\mathbf{e}_2,\\dots ,\\mathbf{e}_N]^{\\intercal}\\in \\mathbb{R}^{N\\times d}\\), where \\(\\mathbf{e}_i = \\mathbf{Emb}(d_i)\\) and \\(d\\) is the embedding dimension. To model the sequential relationships between different documents in the user's history, we introduce positional embedding \\(\\mathbf{P}\\in \\mathbb{R}^{N\\times d}\\). Afterward, the history \\(\\mathcal{H}_u\\) 's embedding becomes \\(\\widehat{\\mathbf{E}}_u = \\mathbf{E}_u + \\mathbf{P}\\). Then, we apply a transformer [39] as the user encoder to encode the user's history \\(\\widehat{\\mathbf{E}}_u\\) and average the transformer's output to obtain the user's embedding:" + }, + { + "type": "equation", + "bbox": [ + 0.146, + 0.575, + 0.482, + 0.593 + ], + "angle": 0, + "content": "\\[\n\\mathbf {e} _ {u} = \\operatorname {E n c o d e r} _ {u} (u) = \\operatorname {M E A N} (\\operatorname {T r m} (\\widehat {\\mathbf {F}} _ {u})) \\in \\mathbb {R} ^ {d}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.599, + 0.482, + 0.642 + ], + "angle": 0, + "content": "where \\(\\mathrm{Encoder}_u(\\cdot)\\to \\mathbb{R}^d\\) denotes the user encoder, \\(\\mathrm{Trm}(\\cdot)\\) denotes a transformer encoder. Next, we train the transformer encoder using contrastive learning." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.65, + 0.482, + 0.678 + ], + "angle": 0, + "content": "4.1.2 Data Augmentation. We generate different views of \\(\\mathcal{H}_u\\) using the following three data augmentation methods:" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.678, + 0.485, + 0.72 + ], + "angle": 0, + "content": "Document Crop. We randomly select a continuous sub-sequence of length \\( L_{c} = \\lfloor \\eta_{c}N\\rfloor \\) from \\( \\mathcal{H}_u \\), where \\( \\eta_c \\) is a hyper-parameter controlling the crop ratio. The history after cropping is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.726, + 0.38, + 0.745 + ], + "angle": 0, + "content": "\\[\n\\mathcal {H} _ {u} ^ {\\mathrm {c r o p}} = [ d _ {c}, d _ {c + 1}, \\dots , d _ {c + L _ {c} - 1} ].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.751, + 0.484, + 0.835 + ], + "angle": 0, + "content": "Document Mask. For the history \\(\\mathcal{H}_u\\), we randomly mask out \\(L_{m} = \\lfloor \\eta_{m}N\\rfloor\\) documents \\(\\mathcal{I}_{\\mathrm{mask}} = \\{i_1,i_2,\\dots ,i_{L_m}\\}\\), where \\(\\mathcal{I}_{\\mathrm{mask}}\\) is the set of indices corresponding to the masked documents and \\(\\eta_{m}\\) is a hyper-parameter that controls the mask ratio. The masked documents are replaced with a special token [mask]. The history after masking is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.184, + 0.841, + 0.377, + 0.899 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {H} _ {u} ^ {\\text {m a s k}} = \\left[ \\hat {d} _ {1}, \\hat {d} _ {2}, \\dots , \\hat {d} _ {N} \\right], \\\\ \\hat {d} _ {i} = \\left\\{ \\begin{array}{l l} d _ {i}, & i \\notin \\mathcal {I} _ {\\text {m a s k}}, \\\\ [ \\text {m a s k} ], & i \\in \\mathcal {I} _ {\\text {m a s k}}. \\end{array} \\right. \\\\ \\end{array}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.524, + 0.105, + 0.905, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.515, + 0.249, + 0.913, + 0.264 + ], + "angle": 0, + "content": "Figure 3: Contrastive learning for user embedding training." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.278, + 0.915, + 0.36 + ], + "angle": 0, + "content": "Document Reorder. We randomly select a sub-sequence \\([d_r, d_{r+1}, \\ldots, d_{r+L_r-1}]\\) of length \\(L_r = \\lfloor \\eta_r N \\rfloor\\) from \\(\\mathcal{H}_u\\), where \\(\\eta_r\\) is a hyper-parameter controlling the reorder ratio, and then randomly shuffle the order of the documents within the sub-sequence to obtain \\([\\hat{d}_r, \\hat{d}_{r+1}, \\ldots, \\hat{d}_{r+L_r-1}]\\). The history after reordering is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.571, + 0.366, + 0.856, + 0.384 + ], + "angle": 0, + "content": "\\[\n\\mathcal {H} _ {u} ^ {\\text {r e o r d e r}} = \\left[ d _ {1}, d _ {2}, \\dots , \\hat {d} _ {r}, \\dots , \\hat {d} _ {r + L _ {r} - 1}, \\dots , d _ {N} \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.389, + 0.915, + 0.528 + ], + "angle": 0, + "content": "4.1.3 Contrastive Loss. Each time, we randomly select two data augmentation methods \\(\\mathcal{A}'\\) and \\(\\mathcal{A}''\\) to generate two different views of \\(\\mathcal{H}_u\\), denoted as \\(\\mathcal{H}_u'\\) and \\(\\mathcal{H}_u''\\). Then, using the encoder described in Section 4.1.1, we obtain the user embeddings \\(\\mathbf{e}_u'\\) and \\(\\mathbf{e}_u''\\) corresponding to the different views. Since \\(\\mathbf{e}_u'\\) and \\(\\mathbf{e}_u''\\) are obtained through data augmentation of \\(\\mathcal{H}_u\\), they are more similar to each other. Therefore, we treat them as positive samples for each other and use the views generated from the augmented histories of other users in the same batch as negative samples. We then perform contrastive learning using the InfoNCE [28] loss as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.565, + 0.534, + 0.913, + 0.609 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {C L}} = - \\left[ \\log \\frac {\\exp \\left(\\cos \\left(\\mathbf {e} _ {u} ^ {\\prime} , \\mathbf {e} _ {u} ^ {\\prime \\prime}\\right) / \\tau_ {1}\\right)}{\\sum_ {u ^ {-} \\in \\mathcal {U} _ {\\mathrm {n e g}}} \\exp \\left(\\cos \\left(\\mathbf {e} _ {u} ^ {\\prime} , \\mathbf {e} _ {u ^ {-}} ^ {\\prime \\prime}\\right) / \\tau_ {1}\\right)} \\right. \\tag {2} \\\\ \\left. + \\log \\frac {\\exp \\left(\\cos \\left(\\mathbf {e} _ {u} ^ {\\prime} , \\mathbf {e} _ {u} ^ {\\prime \\prime}\\right) / \\tau_ {1}\\right)}{\\sum_ {u ^ {-} \\in \\mathcal {U} _ {\\text {n e g}}} \\exp \\left(\\cos \\left(\\mathbf {e} _ {u ^ {-}} ^ {\\prime}, \\mathbf {e} _ {u} ^ {\\prime \\prime}\\right) / \\tau_ {1}\\right)} \\right], \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.614, + 0.916, + 0.656 + ], + "angle": 0, + "content": "where \\(\\tau_{1}\\) is the temperature coefficient, \\(\\mathcal{U}_{\\mathrm{neg}}\\) are the set of randomly sampled in-batch negative samples, and \\(\\cos (\\cdot)\\) denotes the cosine similarity." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.663, + 0.916, + 0.746 + ], + "angle": 0, + "content": "4.1.4 Top-\\(m\\) User Retrieval. After training with contrastive learning, we can use the encoder from Section 4.1.1 to obtain the user embedding \\(\\mathbf{e}_u\\). We then calculate the cosine similarity between each pair of user embeddings and retrieve the top-\\(m\\) most similar users \\(\\mathcal{U}_{\\mathrm{retrieved}} = \\{u_1, u_2, \\dots, u_m\\}\\) for user \\(u\\). Subsequently, the histories of these \\(m\\) users will be used for further document retrieval." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.759, + 0.724, + 0.773 + ], + "angle": 0, + "content": "4.2 Document Retrieval" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.777, + 0.916, + 0.862 + ], + "angle": 0, + "content": "After retrieving the top-\\(m\\) users, we design a personalized retriever to retrieve the top-\\(k\\) documents from each user's history, resulting in a total of \\(m \\times k\\) candidate documents \\(\\mathcal{D}_{\\text{retrieved}} = \\{d_{i,j} | i \\in \\{1, \\ldots, m\\}, j \\in \\{1, \\ldots, k\\}\\}\\). This section introduces how the retriever is designed and how it's trained to retrieve documents that better align with the requirements of personalized LLM generation." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.868, + 0.915, + 0.897 + ], + "angle": 0, + "content": "4.2.1 Retriever. First, we use a pre-trained dense retrieval model (such as BGE retriever [45]) to compute the semantic relevance" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.533, + 0.088 + ], + "angle": 0, + "content": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation" + }, + { + "type": "header", + "bbox": [ + 0.719, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + }, + { + "type": "image", + "bbox": [ + 0.091, + 0.105, + 0.475, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.083, + 0.234, + 0.483, + 0.263 + ], + "angle": 0, + "content": "Figure 4: The method of training the retriever and reranker using LLM feedback." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.281, + 0.384, + 0.295 + ], + "angle": 0, + "content": "between the query and the candidate documents:" + }, + { + "type": "equation", + "bbox": [ + 0.15, + 0.303, + 0.483, + 0.323 + ], + "angle": 0, + "content": "\\[\nS _ {q, d} ^ {\\text {r e t r i e v e r}} = \\cos \\left(\\operatorname {E n c o d e r} _ {q} (q), \\operatorname {E n c o d e r} _ {d} (d)\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.331, + 0.485, + 0.463 + ], + "angle": 0, + "content": "where \\(\\mathrm{Encoder}_q(\\cdot)\\to \\mathbb{R}^d\\) and \\(\\mathrm{Encoder}_d(\\cdot)\\rightarrow \\mathbb{R}^d\\) are the encoders for the query and the document in the retrieval model, respectively. Pre-trained retrieval models typically use \\(S_{q,d}^{\\mathrm{retriever}}\\) directly for retrieval. However, \\(S_{q,d}^{\\mathrm{retriever}}\\) only considers the semantic relevance between the query and the document. Since different users might input the same query but expect different outputs due to their varying preferences, we further account for user personalization by calculating the preference score of the user for the document as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.158, + 0.471, + 0.483, + 0.49 + ], + "angle": 0, + "content": "\\[\nS _ {u, d} ^ {\\text {r e t r i e v e r}} = \\cos \\left(\\mathrm {M L P} _ {1} \\left(\\mathbf {e} _ {u}\\right), \\operatorname {E n c o d e r} _ {d} (d)\\right), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.498, + 0.483, + 0.556 + ], + "angle": 0, + "content": "where \\(\\mathrm{MLP}_1: \\mathbb{R}^d \\to \\mathbb{R}^d\\) is a multi-layer perceptron that maps the user embedding to the space where the cosine similarity is computed. \\(\\mathbf{e}_u\\) is the embedding obtained in Section 4.1.1. The total score for retrieval is computed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.161, + 0.562, + 0.483, + 0.583 + ], + "angle": 0, + "content": "\\[\nS _ {u, q, d} ^ {\\text {r e t r i e v e r}} = (1 - \\alpha) S _ {q, d} ^ {\\text {r e t r i e v e r}} + \\alpha S _ {u, d} ^ {\\text {r e t r i e v e r}}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.589, + 0.485, + 0.618 + ], + "angle": 0, + "content": "where \\(\\alpha\\) is a hyper-parameter that controls the weight of personalization." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.626, + 0.483, + 0.736 + ], + "angle": 0, + "content": "4.2.2 Training. Since the pre-trained dense retrieval model is not fine-tuned for our specific task, the retrieved results may not necessarily lead to LLM responses that better match the target output \\( y \\) [25, 35]. However, there is no ground truth indicating which documents are better. Therefore, we evaluate the difference between the LLM's output and the target output \\( y \\), using this as a label to train the retrieval model. Figure 4 shows the process of training the retriever using LLM feedback." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.737, + 0.483, + 0.822 + ], + "angle": 0, + "content": "Specifically, we first use the pre-trained retrieval model to retrieve the top-\\(k\\) documents from each of the \\(m\\) users' histories based on \\(S_{q,d}^{\\mathrm{retriever}}\\) in Eq. (3), resulting in a total of \\(m \\times k\\) candidate documents. These documents are then concatenated with the query one by one and used as prompts for the LLM, producing \\(m \\times k\\) outputs:" + }, + { + "type": "equation", + "bbox": [ + 0.124, + 0.831, + 0.443, + 0.849 + ], + "angle": 0, + "content": "\\[\n\\{O _ {q, d _ {i, j}} = \\mathrm {L L M} (q, d _ {i, j}) | i \\in \\{1, \\dots , m \\}, j \\in \\{1, \\dots , k \\} \\},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.855, + 0.484, + 0.899 + ], + "angle": 0, + "content": "where \\(\\mathrm{LLM}(q, d_{i,j})\\) represents the output generated by inputting the concatenated query \\(q\\) and document \\(d_{i,j}\\) into the LLM. Then, based on the quality of these outputs, we can calculate the distribution of" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.107, + 0.75, + 0.121 + ], + "angle": 0, + "content": "these candidate documents as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.564, + 0.125, + 0.913, + 0.163 + ], + "angle": 0, + "content": "\\[\np _ {\\text {L L M}} \\left(d _ {i, j} \\mid q, y\\right) = \\frac {\\exp (\\operatorname {e v a l} \\left(y , O _ {q , d _ {i , j}}\\right))}{\\sum_ {i = 1} ^ {m} \\sum_ {j = 1} ^ {k} \\exp (\\operatorname {e v a l} \\left(y , O _ {q , d _ {i , j}}\\right))}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.168, + 0.915, + 0.254 + ], + "angle": 0, + "content": "where \\( \\text{eval}(\\cdot) \\) measures the difference between the target output \\( y \\) and the LLM's output, using metrics such as ROUGE [24] score. A larger value returned by \\( \\text{eval}(\\cdot) \\) indicates a better-generated result. Similarly, we can also calculate the score distribution of the candidate documents by the retrieval model based on \\( S_{u,q,d}^{\\text{retriever}} \\) in Eq. (5):" + }, + { + "type": "equation", + "bbox": [ + 0.571, + 0.26, + 0.913, + 0.305 + ], + "angle": 0, + "content": "\\[\np _ {\\text {r e t r i e v e r}} \\left(d _ {i, j} \\mid q, u\\right) = \\frac {\\exp \\left(S _ {u , q , d _ {i , j}} ^ {\\text {r e t r i e v e r}}\\right)}{\\sum_ {i = 1} ^ {m} \\sum_ {j = 1} ^ {k} \\exp \\left(S _ {u , q , d _ {i , j}} ^ {\\text {r e t r i e v e r}}\\right)}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.309, + 0.915, + 0.379 + ], + "angle": 0, + "content": "We aim for the retrieval model to retrieve documents that lead to better LLM-generated results, which means making the distribution \\( p_{\\mathrm{retriever}}(d|q,u) \\) in Eq. (7) closer to the distribution \\( p_{\\mathrm{LLM}}(d|q,y) \\) in Eq (6). Therefore, we compute the KL divergence between the two distributions as the loss to optimize the retriever:" + }, + { + "type": "equation", + "bbox": [ + 0.568, + 0.386, + 0.913, + 0.402 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {r e t r i e v e r}} = \\mathrm {K L} \\left(p _ {\\text {r e t r i e v e r}} (d | q, u) \\mid p _ {\\text {L L M}} (d | q, y)\\right). \\tag {8}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.414, + 0.71, + 0.427 + ], + "angle": 0, + "content": "4.3 Document Rerank" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.432, + 0.914, + 0.475 + ], + "angle": 0, + "content": "After retrieving \\(\\mathcal{D}_{\\mathrm{retrieved}}\\) through the retriever, in this section, we further refine the results by reranking \\(\\mathcal{D}_{\\mathrm{retrieved}}\\) to obtain the final top-\\(k\\) ranked results \\(\\mathcal{D}_{\\mathrm{reranked}} = \\{d_i | i \\in \\{1, \\dots, k\\}\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.482, + 0.913, + 0.536 + ], + "angle": 0, + "content": "4.3.1 Reranker. We use a pre-trained cross-encoder (such as the BGE reranker [45]) to encode the query and document, obtaining the hidden state corresponding to the [CLS] token from the last layer:" + }, + { + "type": "equation", + "bbox": [ + 0.631, + 0.54, + 0.913, + 0.557 + ], + "angle": 0, + "content": "\\[\n\\mathbf {h} _ {q, d} = \\operatorname {C r o s s E n c o d e r} (q, d), \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.561, + 0.915, + 0.687 + ], + "angle": 0, + "content": "where \\(\\mathbf{h}_{q,d} \\in \\mathbb{R}^d\\). Similarly, when reranking, in addition to considering the semantic relevance between query and document, we also take into account the user's personalized preferences. However, since the cross-encoder does not encode documents separately, it cannot compute the cosine similarity between users and documents as shown in Eq. (4) to express the user preference score. Therefore, we directly concatenate the user embeddings to the output of the cross-encoder to account for the influence of user preferences. The overall score used for reranking is calculated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.573, + 0.693, + 0.913, + 0.713 + ], + "angle": 0, + "content": "\\[\nS _ {u, q, d} ^ {\\text {r e r a n k e r}} = \\mathrm {M L P} _ {3} \\left(\\operatorname {C O N C A T} \\left(\\mathbf {h} _ {q, d}, \\operatorname {M L P} _ {2} (\\mathbf {e} _ {u})\\right)\\right), \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.72, + 0.913, + 0.751 + ], + "angle": 0, + "content": "where \\(\\mathrm{MLP}_2: \\mathbb{R}^d \\to \\mathbb{R}^d\\) and \\(\\mathrm{MLP}_3: \\mathbb{R}^{2d} \\to \\mathbb{R}\\) are two multi-layer perceptions. \\(\\mathrm{CONCAT}(\\cdot)\\) denotes the concatenation operation." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.758, + 0.913, + 0.813 + ], + "angle": 0, + "content": "4.3.2 Training. Similar to the retriever's training in Section 4.2.2, we also want the reranker to assign higher scores to the documents that lead to better LLM-generated results. Therefore, we train the reranker using a similar approach." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.813, + 0.915, + 0.897 + ], + "angle": 0, + "content": "We use the trained retrieval model from Section 4.2.2 to retrieve top-\\(k\\) documents from the history of each of the \\(m\\) users, resulting in a total of \\(m \\times k\\) candidate documents. These documents are concatenated with the query \\(q\\) and used as prompts for the LLM, producing \\(m \\times k\\) outputs. Similar to Eq.(6), we can obtain the distribution \\(p_{\\mathrm{LLM}}(d|q,y)\\) of these candidate documents. Based on" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.28, + 0.087 + ], + "angle": 0, + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + }, + { + "type": "header", + "bbox": [ + 0.843, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "Teng Shi et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.111, + 0.105, + 0.454, + 0.12 + ], + "angle": 0, + "content": "Table 1: Statistics of the datasets used in this paper." + }, + { + "type": "table", + "bbox": [ + 0.09, + 0.125, + 0.478, + 0.2 + ], + "angle": 0, + "content": "
DatasetLaMP-1LaMP-2LaMP-3LaMP-4LaMP-5LaMP-7
#Users6,54292920,0001,64314,68213,437
#Train6,5425,07320,00012,50014,68213,437
#Dev1,5001,4102,5001,5001,5001,498
#Test1,5001,5572,5001,8001,5001,500
" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.214, + 0.482, + 0.247 + ], + "angle": 0, + "content": "\\(S_{u,q,d}^{\\mathrm{reranker}}\\) in Eq. (10), we can also get the score distribution of the candidate documents by the reranker:" + }, + { + "type": "equation", + "bbox": [ + 0.139, + 0.253, + 0.483, + 0.297 + ], + "angle": 0, + "content": "\\[\np _ {\\text {r e r a n k e r}} \\left(d _ {i, j} \\mid q, u\\right) = \\frac {\\exp \\left(S _ {u , q , d _ {i , j}} ^ {\\text {r e r a n k e r}}\\right)}{\\sum_ {i = 1} ^ {m} \\sum_ {j = 1} ^ {k} \\exp \\left(S _ {u , q , d _ {i , j}} ^ {\\text {r e r a n k e r}}\\right)}. \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.303, + 0.49, + 0.332 + ], + "angle": 0, + "content": "We compute the KL divergence between distributions \\( p_{\\mathrm{reranker}}(d|q,u) \\) and \\( p_{\\mathrm{LLM}}(d|q,y) \\) as the loss to optimize the reranker:" + }, + { + "type": "equation", + "bbox": [ + 0.137, + 0.339, + 0.483, + 0.356 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {r e r a n k e r}} = \\mathrm {K L} \\left(p _ {\\text {r e r a n k e r}} (d | q, u) \\mid p _ {\\text {L L M}} (d | q, y)\\right). \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.361, + 0.483, + 0.39 + ], + "angle": 0, + "content": "The loss allows the reranker to assign higher scores to documents that enable better personalized generation by the LLM." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.402, + 0.218, + 0.417 + ], + "angle": 0, + "content": "4.4 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.421, + 0.483, + 0.56 + ], + "angle": 0, + "content": "Computational Efficiency. CFRAG comprises three modules. The User Encoder is a lightweight, single-layer Transformer with inputs derived from a frozen BGE embedding (dimension 768), resulting in minimal parameter overhead. The retriever and reranker are comparable in size to BERT (approximately 100M parameters). Overall, the training cost is low due to the modest parameter size. During inference, user and document embeddings can be precomputed, requiring only similarity calculations for retrieval, ensuring minimal computational cost. This efficiency enables our method to generalize quickly to new datasets." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.574, + 0.22, + 0.589 + ], + "angle": 0, + "content": "5 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.591, + 0.483, + 0.618 + ], + "angle": 0, + "content": "We conducted experiments to evaluate the performance of CFRAG. The source code is available." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.633, + 0.291, + 0.649 + ], + "angle": 0, + "content": "5.1 Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.651, + 0.483, + 0.803 + ], + "angle": 0, + "content": "5.1.1 Dataset. We conducted experiments on the Language Model Personalization (LaMP) [32] benchmark, which consists of seven personalized text generation tasks. We excluded LaMP-6 because its data is not publicly available. The remaining tasks include: LaMP-1 (Personalized Citation Identification); LaMP-2 (Personalized Movie Tagging); LaMP-3 (Personalized Product Rating); LaMP-4 (Personalized News Headline Generation); LaMP-5 (Personalized Scholarly Title Generation); LaMP-7 (Personalized Tweet Paraphrasing). We used the time-based split provided by LaMP to divide the data into training, validation, and test sets. The statistics of these datasets are shown in Table 1." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.812, + 0.483, + 0.868 + ], + "angle": 0, + "content": "5.1.2 Evaluation Metrics. Following previous works [31, 32], we evaluate Accuracy and F-1 score for LaMP-1 and LaMP-2, mean absolute error (MAE) and root mean squared error (RMSE) for LaMP-3, ROUGE-1 and ROUGE-L [24] for LaMP-4, LaMP-5 and LaMP-7." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.107, + 0.916, + 0.135 + ], + "angle": 0, + "content": "5.1.3 Baselines. In this work, we compare CFRAG with the following methods." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.135, + 0.913, + 0.176 + ], + "angle": 0, + "content": "No Personalization: We directly input the user's query into the LLM without retrieving from user history, using this as the non-personalized baseline. We refer to this method as Zero Shot." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.177, + 0.915, + 0.288 + ], + "angle": 0, + "content": "Personalized Baselines: We compared CFRAG with methods that personalize by retrieving from user history using different retrieval models, including: (1) Random selects \\( k \\) items randomly from the user's history; (2) Recency selects the most recent \\( k \\) items from the user's history; (3) BM25 [30] retrieves top- \\( k \\) items from the user's history using BM25; (4) BGE [45] retrieves top- \\( k \\) items from the user's history using BGE retriever; (5) ROPG [31] optimizes the dense retrieval model based on the results generated by the LLM." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.295, + 0.915, + 0.558 + ], + "angle": 0, + "content": "5.1.4 Implementation Details. We conducted experiments on two LLMs: Llama3-8B-Instruct [1] and Qwen2-7B-Instruct [47]. In this paper, we do not fine-tune the LLM because fine-tuning is costly and could cause the LLM to retain user information, potentially compromising user privacy. To ensure a fair comparison, we use greedy search for text generation. The dense retrieval model used in all methods is bge-base-en-v1.5² [45]. The cross-encoder used for reranker in Section 4.3.1 is bge-reranker-base³ [45]. All hyperparameters for the baselines are searched according to the settings in the original papers. The embedding dimension \\(d\\) is set to 768. The number of retrieved documents \\(k\\) is set to 5, and the number of retrieved users \\(m\\) is tuned among \\(\\{2,3,4,5,6\\}\\). The \\(\\mathrm{Trm}(\\cdot)\\) encoder in Eq. (1) has 1 layer and 2 heads. The hyperparameters \\(L_{c}, L_{m}\\), and \\(L_{r}\\) used for data augmentation in Section 4.1.2 are set to 0.7, 0.3, and 0.3, respectively. The temperature parameters \\(\\tau_{1}\\) in Eq. (2) is tuned among \\(\\{0.01, 0.1, 1\\}\\). The weight \\(\\alpha\\) in Eq. (5) is tuned among \\([0.01, 1.0]\\). The learning rate is tuned among \\(\\{1e - 3, 1e - 4, 1e - 5\\}\\). Adam [18] is used to conduct the optimization. The data input and output formats are provided in Appendix A." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.569, + 0.736, + 0.585 + ], + "angle": 0, + "content": "5.2 Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.588, + 0.913, + 0.614 + ], + "angle": 0, + "content": "Experimental results are shown in Table 2. From the results, we can find that:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.616, + 0.915, + 0.699 + ], + "angle": 0, + "content": "- Firstly, compared to existing methods, CFRAG achieved the best results across six datasets in the LaMP benchmark. This demonstrates the effectiveness of introducing collaborative information between users into RAG and using LLM feedback to tune the retriever and reranker to ensure that they can retrieve the documents that support the personalized LLM generation." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.699, + 0.915, + 0.795 + ], + "angle": 0, + "content": "- Secondly, we can observe that even randomly selecting user history outperforms the zero-shot method without any user history. This highlights the importance of incorporating user history to reflect user preferences for personalized generation. Additionally, we observe that retrieval methods perform better than simply selecting the most recent user history, underscoring the importance of retrieval." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.796, + 0.915, + 0.865 + ], + "angle": 0, + "content": "- Thirdly, we also observe that, in most cases, RAG and ROPG methods using dense retrieval models outperform BM25. Additionally, CFRAG, which fine-tunes the retriever based on LLM feedback, achieves better results. This shows, on the one hand, that the better the retriever, the better the generation results, and on the other" + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.616, + 0.915, + 0.865 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.515, + 0.873, + 0.747, + 0.884 + ], + "angle": 0, + "content": "\\(^{2}\\)https://huggingface.co/BAAI/bge-base-en-v1.5" + }, + { + "type": "page_footnote", + "bbox": [ + 0.515, + 0.884, + 0.749, + 0.896 + ], + "angle": 0, + "content": "3https://huggingface.co/BAAI/bge-eranker-base" + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.873, + 0.749, + 0.896 + ], + "angle": 0, + "content": null + }, + { + "type": "footer", + "bbox": [ + 0.085, + 0.884, + 0.286, + 0.896 + ], + "angle": 0, + "content": "\\(^{1}\\)https://github.com/TengShi-RUC/CFRAG" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.533, + 0.088 + ], + "angle": 0, + "content": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation" + }, + { + "type": "header", + "bbox": [ + 0.719, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.104, + 0.916, + 0.162 + ], + "angle": 0, + "content": "Table 2: Comparison of the performance of CFRAG with other approaches on the LaMP benchmark. \\(\\uparrow\\) indicates that a higher value for the corresponding metric is better, while \\(\\downarrow\\) indicates that a lower value is better. The best and the second-best methods are highlighted in bold and underlined fonts, respectively. “*” indicates improvements over the second-best methods are statistically significant (\\(t\\)-test, \\(p\\)-value \\(< 0.05\\))." + }, + { + "type": "table", + "bbox": [ + 0.093, + 0.17, + 0.903, + 0.352 + ], + "angle": 0, + "content": "
LLMsRetrieversLaMP-1LaMP-2LaMP-3LaMP-4LaMP-5LaMP-7
Accuracy ↑F1 ↑Accuracy ↑F1 ↑MAE ↓RMSE ↓ROUGE-1 ↑ROUGE-L ↑ROUGE-1 ↑ROUGE-L ↑ROUGE-1 ↑ROUGE-L ↑
Llama3Zero Shot0.49930.24970.29930.02000.50240.79040.14060.12280.44170.36500.30790.2593
Random0.57400.28700.39290.02620.41040.78330.17870.15710.45330.38750.31370.2508
Recency0.60400.30200.39930.02660.39800.74910.18560.16500.45730.39280.33250.2686
BM25 [30]0.62400.31200.42550.02840.40600.76660.18030.15910.46370.39780.34490.2780
BGE [45]0.63270.31630.45740.03050.35280.69690.18110.16110.46380.39580.33910.2742
ROPG [31]0.64400.32200.46810.03120.34560.69220.18380.16340.46380.39560.35300.2881
CFRAG0.6533*0.3267*0.5340*0.0356*0.2812*0.5997*0.1957*0.1745*0.4810*0.4153*0.3752*0.3055*
Qwen2Zero Shot0.50000.25000.29080.01940.44440.78050.12640.10810.41440.34680.39720.3229
Random0.56330.28170.32840.02190.40000.76210.15810.13770.45800.39210.42910.3564
Recency0.57730.28870.33260.02220.39120.75630.15810.13690.45620.39130.42470.3525
BM25 [30]0.59870.29930.35320.02350.42280.80270.15800.13740.46130.39500.42900.3570
BGE [45]0.60800.30400.36740.02450.36960.72110.16130.13980.45710.39100.43470.3605
ROPG [31]0.60930.30470.38300.02550.36720.73320.16170.14010.46000.39460.43450.3610
CFRAG0.61330.30670.3957*0.02640.3536*0.7071*0.16210.14120.4703*0.4029*0.4425*0.3708*
" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.355, + 0.916, + 0.385 + ], + "angle": 0, + "content": "Table 3: Ablation Study of CFRAG on LaMP based on Llama3. \"MEAN\" represents using the average of user history document embeddings as the user embedding. \"w/o\" indicates the corresponding module in CFRAG is removed." + }, + { + "type": "table", + "bbox": [ + 0.093, + 0.39, + 0.905, + 0.501 + ], + "angle": 0, + "content": "
VariantsLaMP-1LaMP-2LaMP-3LaMP-4LaMP-5LaMP-7
#ModelAccuracy ↑F1 ↑Accuracy ↑F1 ↑MAE ↓RMSE ↓ROUGE-1 ↑ROUGE-L ↑ROUGE-1 ↑ROUGE-L ↑ROUGE-1 ↑ROUGE-L ↑
(0)CFRAG0.65330.32670.53400.03560.28120.59970.19570.17450.48100.41530.37520.3055
(1)w/o User Retrieval0.64000.32000.49360.03290.34440.69250.19140.16890.46420.39630.35660.2903
(2)User Retrieval (MEAN)0.64200.32100.50640.03380.34120.68670.18470.16390.47790.41130.37220.3022
(3)w/o Retriever Tuning0.64530.32270.49790.03320.28520.60700.19160.17040.47420.40480.35990.2940
(4)w/o Sretriever in Eq. (5)0.63330.31670.51130.03410.33240.68610.18950.16960.47500.40880.37320.3039
(5)w/o Reranker Tuning0.63070.31530.46950.03130.36960.73920.17660.15500.47140.40680.34320.2775
(6)w/o eu in Eq. (10)0.63130.31570.49930.03330.34200.69250.18870.16720.47720.41230.37310.3030
" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.509, + 0.483, + 0.551 + ], + "angle": 0, + "content": "hand, fine-tuning the retriever based on LLM feedback to ensure it can retrieve the documents that meet the personalized generation needs of LLM is crucial." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.571, + 0.251, + 0.587 + ], + "angle": 0, + "content": "5.3 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.589, + 0.483, + 0.659 + ], + "angle": 0, + "content": "We conducted an ablation study to investigate the effectiveness of different modules in CFRAG, as shown in Table 3. CFRAG consists of three modules: User Retrieval, Document Retrieval, and Document Rerank. We removed different modules from CFRAG one by one to verify the effectiveness of each module." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.674, + 0.483, + 0.757 + ], + "angle": 0, + "content": "5.3.1 User Retrieval. First, we validated the effectiveness of introducing collaborative information by retrieving similar users, as shown in row (1) of Table 3. It can be seen that without retrieving similar users and only retrieving from the current user's history, the performance is worse than that of CFRAG, highlighting the importance of collaborative information." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.758, + 0.483, + 0.897 + ], + "angle": 0, + "content": "We also validated the effectiveness of training user embeddings using contrastive learning. For comparison, we directly averaged the document embeddings from the user's history to create user embeddings for retrieval, as shown in row (2) of Table 3. It can be seen that CFRAG, which uses user embeddings trained with contrastive learning, achieves better results. This is because contrastive learning constructs user similarity labels through data augmentation and uses the InfoNCE loss to help the embeddings learn which users are similar. In contrast, using mean pooling directly cannot capture user similarity." + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.512, + 0.71, + 0.6 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.588, + 0.605, + 0.645, + 0.617 + ], + "angle": 0, + "content": "(a) LaMP-1" + }, + { + "type": "image", + "bbox": [ + 0.719, + 0.512, + 0.907, + 0.6 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.785, + 0.605, + 0.843, + 0.617 + ], + "angle": 0, + "content": "(b) LaMP-5" + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.631, + 0.916, + 0.702 + ], + "angle": 0, + "content": "Figure 5: Results of using different methods to select users for introducing collaborative information. \"random\" indicates randomly selecting \\( m \\) users; \"top-\\((m - 2m)\\)\" represents selecting users whose similarity to the current user ranks between \\( m \\) and \\( 2m \\); \"top-\\( m \\)\" indicates selecting the most similar \\( m \\) users." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.737, + 0.916, + 0.897 + ], + "angle": 0, + "content": "5.3.2 Document Retrieval. We also validated the effectiveness of the personalized retriever we designed, as shown in Table 3, rows (3) and (4). First, in row (3), we can see that without fine-tuning based on LLM feedback, using a pre-trained dense retrieval model leads to worse performance. This indicates that retrieval cannot be based solely on semantic relevance, ensuring that the retrieved documents support personalized LLM generation is crucial. Additionally, we analyzed the impact of removing \\( S_{u,d}^{\\mathrm{retriever}} \\) from Eq. (4) and only using \\( S_{q,d}^{\\mathrm{retriever}} \\) from Eq. (3) for retrieval, as indicated in row (4). The results decreased, demonstrating that users' personalized preferences should also be considered during retrieval, rather" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.28, + 0.087 + ], + "angle": 0, + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + }, + { + "type": "header", + "bbox": [ + 0.843, + 0.076, + 0.913, + 0.087 + ], + "angle": 0, + "content": "Teng Shi et al." + }, + { + "type": "image", + "bbox": [ + 0.091, + 0.109, + 0.277, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.157, + 0.203, + 0.213, + 0.214 + ], + "angle": 0, + "content": "(a) LaMP-1" + }, + { + "type": "image", + "bbox": [ + 0.287, + 0.109, + 0.476, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.354, + 0.203, + 0.411, + 0.214 + ], + "angle": 0, + "content": "(b) LaMP-5" + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.234, + 0.485, + 0.29 + ], + "angle": 0, + "content": "Figure 6: Results using different retrievers and rerankers. \"BM25\" indicates using BM25 as both the retriever and reranker, while \"w/o Tuning\" refers to using pre-trained retrievers and rerankers without LLM feedback fine-tuning." + }, + { + "type": "image", + "bbox": [ + 0.091, + 0.298, + 0.278, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.157, + 0.392, + 0.213, + 0.403 + ], + "angle": 0, + "content": "(a) LaMP-1" + }, + { + "type": "image", + "bbox": [ + 0.288, + 0.298, + 0.476, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.354, + 0.392, + 0.411, + 0.403 + ], + "angle": 0, + "content": "(b) LaMP-5" + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.423, + 0.483, + 0.464 + ], + "angle": 0, + "content": "Figure 7: Performance under different numbers of retrieved documents from the current user \\( u \\)'s history in the top-\\( k \\) documents." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.475, + 0.483, + 0.503 + ], + "angle": 0, + "content": "than solely focusing on the semantic relevance between the query and documents." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.51, + 0.483, + 0.638 + ], + "angle": 0, + "content": "5.3.3 Document Rerank. We also validated the effectiveness of the personalized reranker we designed, as shown in Table 3, rows (5) and (6). First, in row (5), it can be seen that using a pre-trained reranker leads to worse results, highlighting the importance of fine-tuning based on LLM feedback. We also observed the effect of removing \\(\\mathbf{e}_u\\) from Eq. (10) and only using \\(\\mathbf{h}_{q,d}\\) to calculate \\(S_{q,d}^{\\text{reranker}}\\) for ranking, as indicated in row (6). The results decreased in this case, highlighting the importance of considering users' personalized preferences in the reranker." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.65, + 0.315, + 0.666 + ], + "angle": 0, + "content": "5.4 Experimental Analysis" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.668, + 0.483, + 0.793 + ], + "angle": 0, + "content": "As mentioned in Section 1, adapting collaborative filtering into personalized RAG faces two challenges. Challenge 1: How to introduce collaborative information? Challenge 2: How to retrieve documents that support personalized LLM generation? In this section, we conduct experimental analysis to further demonstrate the effectiveness of our method in addressing these two challenges. Additionally, we provide further analysis of the results of CFRAG and the impact of hyper-parameters. Due to space limitations, we conducted experimental analysis on the LaMP-1 and LaMP-5 datasets." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.799, + 0.483, + 0.897 + ], + "angle": 0, + "content": "5.4.1 Effectiveness of User Retrieval using Contrastive Learning (Challenge 1). As described in Section 1, to address Challenge 1, we train user embeddings using contrastive learning to retrieve the top-\\(m\\) most similar users for introducing collaborative information. To validate the effectiveness of this approach, we compared it with randomly selecting \\(m\\) users and selecting users from top-\\(m\\) to \\(2m\\), as shown in Figure 5. First, we can see that randomly selecting" + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.11, + 0.71, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.589, + 0.202, + 0.645, + 0.214 + ], + "angle": 0, + "content": "(a) LaMP-1" + }, + { + "type": "image", + "bbox": [ + 0.719, + 0.109, + 0.907, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.786, + 0.202, + 0.843, + 0.214 + ], + "angle": 0, + "content": "(b) LaMP-5" + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.234, + 0.915, + 0.275 + ], + "angle": 0, + "content": "Figure 8: Performance under different numbers of retrieved users. The performance is the worst since no collaborative information is introduced when \\( m = 1 \\)." + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.291, + 0.709, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.589, + 0.385, + 0.643, + 0.395 + ], + "angle": 0, + "content": "(a) LaMP-1" + }, + { + "type": "image", + "bbox": [ + 0.719, + 0.291, + 0.907, + 0.379 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.786, + 0.385, + 0.842, + 0.396 + ], + "angle": 0, + "content": "(b) LaMP-5" + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.415, + 0.915, + 0.444 + ], + "angle": 0, + "content": "Figure 9: Performance under different numbers of retrieved documents per user." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.463, + 0.915, + 0.56 + ], + "angle": 0, + "content": "users yields the worst performance, indicating that collaborative information cannot be introduced indiscriminately. Secondly, the results show that retrieving users from the range of top-\\(m\\) to \\(2m\\) performs worse than using the top-\\(m\\) users, suggesting that information from users who are more similar to the current user \\(u\\) is more important. These highlight the importance of retrieving the most similar top-\\(m\\) users" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.569, + 0.916, + 0.72 + ], + "angle": 0, + "content": "5.4.2 Effectiveness of Document Retrieval using LLM Feedback (Challenge 2). As mentioned in Section 1, to address Challenge 2, we fine-tune the retriever and reranker using feedback from the content generated by the LLM, enabling them to retrieve documents that better meet personalized LLM generation needs. To validate its effectiveness, we compared the results with those using retrievers and rerankers without LLM feedback fine-tuning, as well as using BM25 as the retriever and reranker, as shown in Figure 6. It can be observed that CFRAG performs the best, highlighting the importance of fine-tuning with LLM feedback rather than relying solely on semantic relevance." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.73, + 0.916, + 0.897 + ], + "angle": 0, + "content": "5.4.3 Impact of the Number of Documents from the Current User. To further validate that CFRAG enhances personalization by incorporating collaborative information, we observed the impact of the number of documents from the current user in the final top-\\(k\\) documents on the results, as shown in Figure 7. We varied the number of documents retrieved from the current user's history in the top-\\(k\\) documents from 0 to 5, with the remaining documents retrieved from similar users' histories. The results indicate that retrieving only from the current user's history leads to poor performance, while appropriately retrieving documents from similar users' histories significantly improves the results. This verifies the importance of incorporating collaborative information." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.533, + 0.088 + ], + "angle": 0, + "content": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation" + }, + { + "type": "header", + "bbox": [ + 0.719, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.104, + 0.916, + 0.175 + ], + "angle": 0, + "content": "Table 4: The format of input, output, and user history for different datasets in the LaMP [32] benchmark. In the input, \\(\\{history_{i}\\}\\) will be replaced by the retrieved \\(i\\)-th history, and each history is represented as shown in the \"User History\" column. The other italicized text in the input is replaced with the user's input. For text generation tasks, to ensure that the LLM does not generate irrelevant information, we instruct the LLM in the input to generate in JSON format, and then we extract the LLM's prediction from the JSON-formatted output." + }, + { + "type": "table", + "bbox": [ + 0.094, + 0.179, + 0.907, + 0.622 + ], + "angle": 0, + "content": "
TaskInputOutputUser History
LaMP-1The historical profiles are as follows: {history1} ... {historyk}. \nBased on the historical profiles provided, please choose one of the following two references that is more relevant to the user's input title: [1] {reference1}; [2] {reference2}. Please just answer with “[1” or “[2” without explanation. “title”: {title}.[1]“title”: {title}\n“abstract”: {abstract}
LaMP-2The historical profiles are as follows: {history1} ... {historyk}. \nBased on the historical profiles provided, please select the tag from [sci-fi, based on a book, comedy ... ] that is most relevant to the user's input description. Please just answer with the tag name without explanation. “description”: {description}; “tag”:comedy“description”: {description};\n“tag”: {tag}
LaMP-3The historical profiles are as follows: {history1} ... {historyk}. \nBased on the historical profiles provided, what is the score of the following review on a scale of 1 to 5? just answer with 1, 2, 3, 4, or 5 without further explanation. “review”: {review}; “score”:5“review”: {review}\n“score”: {score}
LaMP-4The historical profiles are as follows: {history1} ... {historyk}. \nBased on the historical profiles provided, please generate a title for the given user's input text. Please generate it in the following format: {"title": "generated title"} without explanation, and use only English. “text”: {text}; “title”:{“title”: Finding Happiness \nAfter Divorce - It Can Happen}“text”: {text}\n“title”: {title}
LaMP-5The historical profiles are as follows: {history1} ... {historyk}. \nBased on the historical profiles provided, please generate a title for the given user's input abstract. Please generate it in the following format: {"title": "generated title"} without explanation, and use only English. “abstract”: {abstract}; “title”:{“title”: Link-Reliability Based \nTwo-Hop Routing for \nWireless Sensor Networks.}“abstract”: {abstract}\n“title”: {title}
LaMP-7The historical profiles are as follows: {history1} ... {historyk}. \nBased on the style pattern of the historical tweets provided, please paraphrase the user's input tweet without any explanation before or after it. Please generate it in the following format: {"tweet": "generated tweet"} without explanation, and use only English. “tweet”: {tweet}.{“tweet”:lilxcutiesworld the \ndanny picture is GOOD!! \nI really like it.}“tweet”: {tweet}
" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.632, + 0.483, + 0.771 + ], + "angle": 0, + "content": "5.4.4 Impact of the Number of Retrieved Users. Since we enhance personalized text generation by introducing collaborative filtering, we further explored how much collaborative information to introduce, specifically the impact of the number of retrieved users on the results, as shown in Figure 8. In LaMP-1, retrieving too few or too many users leads to poorer performance, with the best results at 4 users. In LaMP-5, the performance improves as the number of users increases. This highlights the importance of introducing collaborative filtering, but it also indicates that excessive introduction can lead to decreased effectiveness." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.786, + 0.483, + 0.897 + ], + "angle": 0, + "content": "5.4.5 Impact of the Number of Retrieved Documents. We also analyzed the impact of the number of retrieved documents, \\( k \\), on the results, as shown in Figure 9. It can be observed that as the number of retrieved documents increases, performance improves, indicating the importance of retrieving user history to reflect user preferences for enhancing LLM-generated results. Since more documents lead to longer prompts and slower LLM generation, we chose \\( k = 5 \\) for our experiments." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.631, + 0.64, + 0.645 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.649, + 0.916, + 0.801 + ], + "angle": 0, + "content": "In this paper, we propose CFRAG, which adapts collaborative filtering into RAG to personalize LLMs. To introduce collaborative information without explicit user labels and retrieve documents that support personalized LLM generation, we first train user embeddings through contrastive learning to retrieve similar users. Then, we design the personalized retriever and reranker that considers user preferences during retrieval and fine-tune them using LLM feedback. The results on the Language Model Personalization (LaMP) benchmark validate the effectiveness of CFRAG. The experimental analysis also confirms the effectiveness of each module within CFRAG." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.85, + 0.71, + 0.866 + ], + "angle": 0, + "content": "A Appendix: Prompts" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.868, + 0.916, + 0.896 + ], + "angle": 0, + "content": "We provide detailed formats for the inputs, outputs, and user histories for the LLM across different datasets, as shown in Table 4." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.28, + 0.088 + ], + "angle": 0, + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + }, + { + "type": "header", + "bbox": [ + 0.843, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "Teng Shi et al." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.106, + 0.178, + 0.12 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.123, + 0.482, + 0.143 + ], + "angle": 0, + "content": "[1] AI@Meta. 2024. Llama 3 Model Card. (2024). https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.144, + 0.483, + 0.174 + ], + "angle": 0, + "content": "[2] Akari Asai, Zegiu Wu, Yizhong Wang, Avirup Sil, and Hannaneh Hajishirzi. [n.d.]. Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection. In The Twelfth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.174, + 0.482, + 0.204 + ], + "angle": 0, + "content": "[3] Sebastian Borgeaud, Arthur Mensch, et al. 2022. Improving language models by retrieving from trillions of tokens. In International conference on machine learning, PMLR, 2206-2240." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.205, + 0.482, + 0.233 + ], + "angle": 0, + "content": "[4] Jin Chen, Zheng Liu, et al. 2024. When large language models meet personalization: Perspectives of challenges and opportunities. World Wide Web 27, 4 (2024), 42." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.234, + 0.483, + 0.264 + ], + "angle": 0, + "content": "[5] Sunhao Dai, Ninglu Shao, et al. 2023. Uncovering chatgpt's capabilities in recommender systems. In Proceedings of the 17th ACM Conference on Recommender Systems. 1126-1132." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.265, + 0.482, + 0.315 + ], + "angle": 0, + "content": "[6] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.316, + 0.482, + 0.355 + ], + "angle": 0, + "content": "[7] Wenqi Fan, Yujuan Ding, Liangbo Ning, Shijie Wang, Hengyun Li, Dawei Yin, Tat-Seng Chua, and Qing Li. 2024. A Survey on RAG Meeting LLMs: Towards Retrieval-Augmented Large Language Models. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, 6491-6501." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.356, + 0.482, + 0.385 + ], + "angle": 0, + "content": "[8] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, and Haofen Wang. 2023. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.386, + 0.482, + 0.415 + ], + "angle": 0, + "content": "[9] Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. 2020. Retrieval augmented language model pre-training. In International conference on machine learning. PMLR, 3929-3938." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.416, + 0.482, + 0.455 + ], + "angle": 0, + "content": "[10] Xiangnan He, Kuan Deng, Xiang Wang, Yan Li, Yongdong Zhang, and Meng Wang. 2020. Lightgcn: Simplifying and powering graph convolution network for recommendation. In Proceedings of the 43rd International ACM SIGIR conference on research and development in Information Retrieval. 639-648." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.456, + 0.482, + 0.485 + ], + "angle": 0, + "content": "[11] Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. 2017. Neural collaborative filtering. In Proceedings of the 26th international conference on world wide web. 173-182." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.486, + 0.482, + 0.516 + ], + "angle": 0, + "content": "[12] Edward J Hu, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. [n.d.]. LoRA: Low-Rank Adaptation of Large Language Models. In International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.517, + 0.482, + 0.556 + ], + "angle": 0, + "content": "[13] Gautier Izacard and Edouard Grave. 2021. Leveraging Passage Retrieval with Generative Models for Open Domain Question Answering. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume. 874-880." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.557, + 0.482, + 0.577 + ], + "angle": 0, + "content": "[14] Gautier Izacard, Patrick Lewis, et al. 2022. Few-shot learning with retrieval augmented language models. arXiv preprint arXiv:2208.03299 1, 2 (2022), 4." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.578, + 0.482, + 0.607 + ], + "angle": 0, + "content": "[15] Ashish Jaiswal, Ashwin Ramesh Babu, Mohammad Zaki Zadeh, Debapriya Banerjee, and Fillia Makedon. 2020. A survey on contrastive self-supervised learning. Technologies 9, 1 (2020), 2." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.608, + 0.482, + 0.637 + ], + "angle": 0, + "content": "[16] Joel Jang, Seungone Kim, et al. 2023. Personalized soups: Personalized large language model alignment via post-hoc parameter merging. arXiv preprint arXiv:2310.11564 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.638, + 0.482, + 0.667 + ], + "angle": 0, + "content": "[17] Nikhil Kandpal, Haikang Deng, Adam Roberts, Eric Wallace, and Colin Raffel. 2023. Large language models struggle to learn long-tail knowledge. In International Conference on Machine Learning. PMLR, 15696-15707." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.668, + 0.482, + 0.687 + ], + "angle": 0, + "content": "[18] Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.688, + 0.482, + 0.707 + ], + "angle": 0, + "content": "[19] Yehuda Koren, Robert Bell, and Chris Volinsky. 2009. Matrix factorization techniques for recommender systems. Computer 42, 8 (2009), 30-37." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.708, + 0.482, + 0.737 + ], + "angle": 0, + "content": "[20] Patrick Lewis, Ethan Perez, et al. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems 33 (2020), 9459-9474." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.738, + 0.482, + 0.768 + ], + "angle": 0, + "content": "[21] Cheng Li, Mingyang Zhang, Qiaozhu Mei, Yaqing Wang, Spurthi Amba Hombaiah, Yi Liang, and Michael Bendersky. 2023. Teach LLMs to Personalize-An Approach inspired by Writing Education. arXiv preprint arXiv:2308.07968 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.769, + 0.482, + 0.798 + ], + "angle": 0, + "content": "[22] Junyi Li, Tianyi Tang, Wayne Xin Zhao, Jian-Yun Nie, and Ji-Rong Wen. 2024. Pre-trained language models for text generation: A survey. Comput. Surveys 56, 9 (2024), 1-39." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.799, + 0.482, + 0.818 + ], + "angle": 0, + "content": "[23] Xinyu Li, Zachary C Lipton, and Liu Leqi. 2024. Personalized language modeling from personalized human feedback. arXiv preprint arXiv:2402.05133 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.819, + 0.482, + 0.838 + ], + "angle": 0, + "content": "[24] Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out. 74-81." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.839, + 0.482, + 0.878 + ], + "angle": 0, + "content": "[25] Xi Victoria Lin, Xilun Chen, Mingda Chen, Weijia Shi, Maria Lomeli, Richard James, Pedro Rodriguez, Jacob Kahn, Gergely Szilvasy, Mike Lewis, et al. [n.d.]. RA-DIT: Retrieval-Augmented Dual Instruction Tuning. In The Twelfth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.879, + 0.482, + 0.897 + ], + "angle": 0, + "content": "[26] Yinhan Liu. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692 (2019)." + }, + { + "type": "list", + "bbox": [ + 0.087, + 0.123, + 0.483, + 0.897 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.109, + 0.913, + 0.139 + ], + "angle": 0, + "content": "[27] Sheshera Mysore, Zhuoran Lu, et al. 2023. Pearl: Personalizing large language model writing assistants with generation-calibrated retrievers. arXiv preprint arXiv:2311.09180 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.141, + 0.913, + 0.16 + ], + "angle": 0, + "content": "[28] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. 2018. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748 (2018)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.161, + 0.913, + 0.2 + ], + "angle": 0, + "content": "[29] Chris Richardson, Yao Zhang, Kellen Gillespie, Sudipta Kar, Arshdeep Singh, Zeynab Raeesy, Omar Zia Khan, and Abhinav Sethy. 2023. Integrating summarization and retrieval for enhanced personalization via large language models. arXiv preprint arXiv:2310.20081 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.201, + 0.913, + 0.23 + ], + "angle": 0, + "content": "[30] Stephen E Robertson, Steve Walker, Susan Jones, Micheline M Hancock-Beaulieu, Mike Gatford, et al. 1995. Okapi at TREC-3. Nist Special Publication Sp 109 (1995), 109." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.231, + 0.913, + 0.27 + ], + "angle": 0, + "content": "[31] Alireza Salemi, Surya Kallumadi, and Hamed Zamani. 2024. Optimization methods for personalizing large language models through retrieval augmentation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 752-762." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.271, + 0.913, + 0.3 + ], + "angle": 0, + "content": "[32] Alireza Salemi, Sheshera Mysore, Michael Bendersky, and Hamed Zamani. 2023. Lamp: When large language models meet personalization. arXiv preprint arXiv:2304.11406 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.301, + 0.913, + 0.33 + ], + "angle": 0, + "content": "[33] Chenglei Shen, Xiao Zhang, Teng Shi, Changshuo Zhang, Guofu Xie, and Jun Xu. 2024. A survey of controllable learning: Methods and applications in information retrieval. arXiv preprint arXiv:2407.06083 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.332, + 0.913, + 0.381 + ], + "angle": 0, + "content": "[34] Teng Shi, Zihua Si, Jun Xu, Xiao Zhang, Xiaoxue Zang, Kai Zheng, Dewei Leng, Yanan Niu, and Yang Song. 2024. UniSAR: Modeling User Transition Behaviors between Search and Recommendation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1029-1039." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.382, + 0.913, + 0.432 + ], + "angle": 0, + "content": "[35] Weijia Shi, Sewon Min, Michihiro Yasunaga, Minjoon Seo, Richard James, Mike Lewis, Luke Zettlemoyer, and Wen-tau Yih. 2024. REPLUG: Retrieval-Augmented Black-Box Language Models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers). 8364-8377." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.433, + 0.913, + 0.462 + ], + "angle": 0, + "content": "[36] Zhaoxuan Tan, Zheyuan Liu, and Meng Jiang. 2024. Personalized Pieces: Efficient Personalized Large Language Models through Collaborative Efforts. arXiv preprint arXiv:2406.10471 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.463, + 0.913, + 0.492 + ], + "angle": 0, + "content": "[37] Zhaoxuan Tan, Qingkai Zeng, Yijun Tian, Zheyuan Liu, Bing Yin, and Meng Jiang. 2024. Democratizing Large Language Models via Personalized Parameter-Efficient Fine-tuning. arXiv:2402.04401 [cs.CL] https://arxiv.org/abs/2402.04401" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.493, + 0.913, + 0.532 + ], + "angle": 0, + "content": "[38] Jiakai Tang, Sunhao Dai, Teng Shi, Jun Xu, Xu Chen, Wen Chen, Wu Jian, and Yuning Jiang. 2025. Think Before Recommend: Unleashing the Latent Reasoning Power for Sequential Recommendation. arXiv:2503.22675 [cs.IR] https://arxiv.org/abs/2503.22675" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.533, + 0.913, + 0.552 + ], + "angle": 0, + "content": "[39] A Vaswani. 2017. Attention is all you need. Advances in Neural Information Processing Systems (2017)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.553, + 0.913, + 0.583 + ], + "angle": 0, + "content": "[40] Xiang Wang, Xiangnan He, Meng Wang, Fuli Feng, and Tat-Seng Chua. 2019. Neural graph collaborative filtering. In Proceedings of the 42nd international ACM SIGIR conference on Research and development in Information Retrieval. 165-174." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.584, + 0.913, + 0.613 + ], + "angle": 0, + "content": "[41] Likang Wu, Zhi Zheng, Zhaopeng Qiu, Hao Wang, Hongchao Gu, Tingjia Shen, Chuan Qin, Chen Zhu, Hengshu Zhu, Qi Liu, et al. 2024. A survey on large language models for recommendation. World Wide Web 27, 5 (2024), 60." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.614, + 0.913, + 0.643 + ], + "angle": 0, + "content": "[42] Xinghao Wu, Xuefeng Liu, Jianwei Niu, Haolin Wang, Shaojie Tang, and Guogang Zhu. 2024. FedLoRA: When Personalized Federated Learning Meets Low-Rank Adaptation. (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.644, + 0.913, + 0.683 + ], + "angle": 0, + "content": "[43] Zeqiu Wu, Yushi Hu, Weijia Shi, Nouha Dziri, Alane Suhr, Prithviraj Ammanabrolu, Noah A Smith, Mari Ostendorf, and Hannaneh Hajishirzi. 2024. Fine-grained human feedback gives better rewards for language model training. Advances in Neural Information Processing Systems 36 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.684, + 0.913, + 0.713 + ], + "angle": 0, + "content": "[44] Zhuofeng Wu, Sinong Wang, Jiatao Gu, Madian Khabsa, Fei Sun, and Hao Ma. 2020. Clear: Contrastive learning for sentence representation. arXiv preprint arXiv:2012.15466 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.714, + 0.913, + 0.744 + ], + "angle": 0, + "content": "[45] Shitao Xiao, Zheng Liu, Peitian Zhang, and Niklas Muennighoff. 2023. C-Pack: Packaged Resources To Advance General Chinese Embedding. arXiv:2309.07597 [cs.CL]" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.745, + 0.913, + 0.773 + ], + "angle": 0, + "content": "[46] Hong-Jian Xue, Xinyu Dai, Jianbing Zhang, Shujian Huang, and Jiajun Chen. 2017. Deep matrix factorization models for recommender systems.. In IJCAI, Vol. 17. Melbourne, Australia, 3203-3209." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.775, + 0.913, + 0.804 + ], + "angle": 0, + "content": "[47] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Cheng-peng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. 2024. Qwen2 technical report. arXiv preprint arXiv:2407.10671 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.805, + 0.913, + 0.835 + ], + "angle": 0, + "content": "[48] Changshuo Zhang, Teng Shi, Xiao Zhang, Qi Liu, Ruobing Xie, Jun Xu, and Ji-Rong Wen. 2024. Modeling Domain and Feedback Transitions for Cross-Domain Sequential Recommendation. arXiv preprint arXiv:2408.08209 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.836, + 0.913, + 0.865 + ], + "angle": 0, + "content": "[49] Changshuo Zhang, Teng Shi, Xiao Zhang, Yanping Zheng, Ruobing Xie, Qi Liu, Jun Xu, and Ji-Rong Wen. 2024. QAGCF: Graph Collaborative Filtering for Q&A Recommendation. arXiv preprint arXiv:2406.04828 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.866, + 0.913, + 0.895 + ], + "angle": 0, + "content": "[50] Changshuo Zhang, Xiao Zhang, Teng Shi, Jun Xu, and Ji-Rong Wen. 2025. Test-Time Alignment for Tracking User Interest Shifts in Sequential Recommendation. arXiv:2504.01489 [cs.IR] https://arxiv.org/abs/2504.01489" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.109, + 0.913, + 0.895 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.533, + 0.088 + ], + "angle": 0, + "content": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation" + }, + { + "type": "header", + "bbox": [ + 0.719, + 0.076, + 0.912, + 0.088 + ], + "angle": 0, + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.109, + 0.484, + 0.151 + ], + "angle": 0, + "content": "[51] Kepu Zhang, Teng Shi, Sunhao Dai, Xiao Zhang, Yinfeng Li, Jing Lu, Xiaoxue Zang, Yang Song, and Jun Xu. 2024. SAQRec: Aligning Recommender Systems to User Satisfaction via Questionnaire Feedback. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 3165-3175." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.151, + 0.484, + 0.182 + ], + "angle": 0, + "content": "[52] Xiao Zhang, Teng Shi, Jun Xu, Zhenhua Dong, and Ji-Rong Wen. 2024. Model-Agnostic Causal Embedding Learning for Counterfactually Group-Fair Recommendation. IEEE Transactions on Knowledge and Data Engineering (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.182, + 0.482, + 0.222 + ], + "angle": 0, + "content": "[53] Yue Zhang, Yafu Li, Leyang Cui, Deng Cai, Lemao Liu, Tingchen Fu, Xinting Huang, Enbo Zhao, Yu Zhang, Yulong Chen, et al. 2023. Siren's song in the AI ocean: a survey on hallucination in large language models. arXiv preprint arXiv:2309.01219 (2023)." + }, + { + "type": "list", + "bbox": [ + 0.085, + 0.109, + 0.484, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.109, + 0.913, + 0.14 + ], + "angle": 0, + "content": "[54] Wayne Xin Zhao, Jing Liu, Ruiyang Ren, and Ji-Rong Wen. 2024. Dense text retrieval based on pretrained language models: A survey. ACM Transactions on Information Systems 42, 4 (2024), 1-60." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.141, + 0.914, + 0.171 + ], + "angle": 0, + "content": "[55] Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, et al. 2023. A survey of large language models. arXiv preprint arXiv:2303.18223 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.171, + 0.914, + 0.201 + ], + "angle": 0, + "content": "[56] Yutao Zhu, Huaying Yuan, Shuting Wang, Jiongnan Liu, Wenhan Liu, Chenlong Deng, Haonan Chen, Zhicheng Dou, and Ji-Rong Wen. 2023. Large language models for information retrieval: A survey. arXiv preprint arXiv:2308.07107 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.201, + 0.915, + 0.231 + ], + "angle": 0, + "content": "[57] Yuchen Zhuang, Haotian Sun, Yue Yu, Qifan Wang, Chao Zhang, and Bo Dai. 2024. HYDRA: Model Factorization Framework for Black-Box LLM Personalization. arXiv preprint arXiv:2406.02888 (2024)." + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.109, + 0.915, + 0.231 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05731/1d2e0a8a-e48a-4cb6-8ec0-35774101eda9_origin.pdf b/data/2025/2504_05xxx/2504.05731/1d2e0a8a-e48a-4cb6-8ec0-35774101eda9_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4bb5ba1fef8056c4c4d9711edfda47b980dc457c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/1d2e0a8a-e48a-4cb6-8ec0-35774101eda9_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24cf8e274e0db1c92f380e61d9d9f76f575771d23873fefb5f116bf0c00ceb3c +size 1203734 diff --git a/data/2025/2504_05xxx/2504.05731/full.md b/data/2025/2504_05xxx/2504.05731/full.md new file mode 100644 index 0000000000000000000000000000000000000000..9a01de5351d2b3d0c4960fbe0259c47f59fe8b13 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/full.md @@ -0,0 +1,477 @@ +# Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation + +Teng Shi + +Renmin University of China + +Beijing, China + +shiteng@ruc.edu.cn + +Xiaoxue Zang + +Kai Zheng + +Kuaishou Technology Co., Ltd. + +Beijing, China + +xxic666@126.com + +zhengk92@gmail.com + +# Abstract + +Recently, the personalization of Large Language Models (LLMs) to generate content that aligns with individual user preferences has garnered widespread attention. Personalized Retrieval-Augmented Generation (RAG), which retrieves relevant documents from the user's history to reflect their preferences and enhance LLM generation, is one commonly used approach for personalization. However, existing personalized RAG methods do not consider that the histories of similar users can also assist in personalized generation for the current user, meaning that collaborative information between users can also benefit personalized generation. Inspired by the application of collaborative filtering in recommender systems, we propose a method called CFRAG, which adapts Collaborative Filtering to RAG for personalized text generation. However, this presents two challenges: (1) how to incorporate collaborative information without explicit user similarity labels? (2) how to retrieve documents that support personalized LLM generation? For Challenge 1, we use contrastive learning to train user embeddings to retrieve similar users and introduce collaborative information. For Challenge 2, we design a personalized retriever and reranker to retrieve the top- $k$ documents from these users' histories. We take into account the user's preference during retrieval and reranking. Then we leverage feedback from the LLM to fine-tune the personalized retriever and reranker, enabling them to retrieve documents that meet the personalized generation needs of the LLM. Experimental results on the Language Model Personalization (LaMP) benchmark + +*Corresponding authors. Work partially done at Engineering Research Center of Next-Generation Intelligent Search and Recommendation, Ministry of Education. +Work done when Teng Shi was the intern at Kuaishou. + +Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org. + +SIGIR '25, Padua, Italy. + +© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM. ACM ISBN 979-8-4007-1592-1/25/07 + +https://doi.org/10.1145/XXXXXX.XXXXXXX + +Jun Xu* + +Xiao Zhang + +Renmin University of China + +Beijing, China + +{junxu,zhangx89}@ruc.edu.cn + +Yang Song + +Han Li + +Kuaishou Technology Co., Ltd. + +Beijing, China + +ys@sonyis.me + +lihan08@kuaishou.com + +validate the effectiveness of CFRAG. Further analysis confirms the importance of incorporating collaborative information. + +# CCS Concepts + +- Information systems $\rightarrow$ Personalization; - Computing methodologies $\rightarrow$ Natural language generation. + +# Keywords + +Large language model; Personalization; Retrieval augmented generation + +# ACM Reference Format: + +Teng Shi, Jun Xu, Xiao Zhang, Xiaoxue Zang, Kai Zheng, Yang Song, and Han Li. 2025. Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation. In Proceedings of the 48th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '25), July 13-18, 2025, Padua, Italy. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/XXXXXX.XXXXXXX + +# 1 Introduction + +Personalizing Large Language Models (LLMs) [55] to generate personalized outputs tailored to individual user preferences has emerged as a significant and rapidly growing field [16, 23, 29, 31, 32, 36, 37, 57]. Personalized Retrieval-Augmented Generation (RAG) [8] has become a commonly used approach for personalizing LLMs [29, 31, 32, 57]. + +The process of existing personalized RAG methods typically involves retrieving similar documents from the user's historical behaviors based on the user's input query, then concatenating these documents with the query as a prompt input to the LLM for generation. Although effective, this approach is limited to retrieving only the current user's history, neglecting collaborative information. Users with similar histories tend to be more alike, and the information from these similar users can also aid in personalizing generation for the current user. As shown in the example in Figure 1, the upper part illustrates the results of the existing RAG method, which retrieves documents from the current user's history. We can only infer from these results that "She" in the user's input refers to "Hillary Clinton". In contrast, the lower part demonstrates + +![](images/c145d4b73d48d432adc24495509ff5b7dc143be4525fd4b84a31137fa6d78e46.jpg) +Figure 1: An example from the LaMP-4 dataset [32]. The task of LaMP-4 is to generate personalized news headlines based on user input. This example illustrates the benefit of collaborative information for LLM personalization: (a) The top shows results retrieved by the existing RAG method from the current user's history, where we can only infer that "She" in the user's input refers to "Hillary Clinton". (b) The bottom shows results retrieved by our method from similar users' histories, allowing us to infer further that "his" in the user's input refers to "Donald Trump" thus enabling the generation of a more accurate result. + +our method, which retrieves documents from the history of similar users. In this case, we can further infer that "his" in the user's input refers to "Donald Trump", leading to a better generation result. From this example, we can see that incorporating collaborative information allows the retrieval of more diverse documents, helping the LLM generate results that better meet the user's needs. + +Inspired by the application of collaborative filtering in recommender systems [11, 40, 46], we propose to adapt collaborative information into RAG to personalize LLMs. However, adapting collaborative filtering to personalized RAG presents two challenges. Challenge 1: How to incorporate collaborative information. Without explicit labels indicating which users are similar, which users' information should be selected to help personalize generation for the current user? Challenge 2: How to retrieve documents that support personalized LLM generation, rather than relying on traditional semantic relevance? Pre-trained dense retrieval models [54] only retrieve based on the semantic relevance between the query and document. Directly using these models for retrieval may not necessarily result in content that allows the LLM to generate outputs that meet the user's needs [25, 35]. + +To address the above challenges, this paper proposes a method named CFRAG which adapts Collaborative Filtering to personalized Retrieval Augmented Generation. Firstly, to address Challenge 1, since there are no explicit user similarity labels, we use contrastive learning [15, 44] to train user embeddings for retrieving similar users to introduce collaborative information. Specifically, we apply different data augmentation methods to the user's history to obtain different views, and then treat different views of the same user's history as positive samples for each other. Then we use contrastive learning on different views to train the user embeddings. Secondly, for Challenge 2, we designed a personalized retriever and reranker to retrieve the top- $k$ documents from the histories of the retrieved users. In both retrieval and reranking, in addition to the semantic + +relevance between the query and documents, we also considered the user's preferences for different documents to enable personalized retrieval. Additionally, we further fine-tune the retriever and reranker based on the feedback from the LLM to ensure that the retrieved documents better support the personalized LLM generation. Finally, the top- $k$ documents are concatenated with the user's input query to form a prompt, which is then fed into the LLM for personalized generation. + +The major contributions of the paper are summarized as follows: We analyzed the necessity of introducing collaborative filtering into RAG for LLM personalization and identified the challenges: how to introduce collaborative information and how to retrieve documents that support personalized LLM generation. + +- We proposed a method called CFRAG, which uses contrastive learning to train user embeddings for retrieving similar users and incorporating collaborative information. It leverages LLM feedback to train the personalized retriever and reranker, enabling them to retrieve documents that support personalized LLM generation. +- Experimental results on the Language Model Personalization (LaMP) [32] benchmark validate the effectiveness of CFRAG. The experimental analysis also demonstrates the importance of leveraging collaborative information. + +# 2 Related Work + +Personalization of LLMs. Large Language Models (LLMs) [55] have demonstrated remarkable capabilities in various fields, such as text generation [22], information retrieval [56], recommender systems [5, 41], and so on. However, since LLMs are typically designed to serve all tasks with a single model and are trained on broad, domain-agnostic data, they face challenges in adapting to the personalized needs of individual users [4, 32]. Therefore, LLM personalization has attracted widespread attention [16, 31, 57]. + +![](images/34f15c2d425a0ba3e0b25f67abb59aecc0f185aae42a557093eae9d488fd5f26.jpg) +Figure 2: The architecture of CFRAG. From left to right: (a) User Retrieval retrieves similar users (Section 4.1); (b) Retriever retrieves the top- $k$ documents from each user's history (Section 4.2); (c) Reranker reranks the $m \times k$ documents to get the final top- $k$ documents, which are then concatenated with the query and input into the LLM for personalized text generation (Section 4.3). + +Existing works on LLM personalization mainly include the following types of methods: (1) Fine-tuning a personalized LLM for each user [36, 37, 42]; Tan et al. [37] fine-tuned the LLM using LoRA [12] to get personalized LoRA parameters for each user. (2) Aligning LLMs with user-specific preferences through Reinforcement Learning from Human Feedback (RLHF) [16, 23, 43]; Jang et al. [16] first trained different parameters for various objectives using RLHF, then merged these parameters based on users' personalized needs. (3) Incorporating user-specific context into the prompt [21, 27, 29, 31, 32, 57]. Richardson et al. [29] used instruction-tuned LLMs to summarize user history and then incorporated it into prompts for generation. Salemi et al. [31, 32] used RAG to retrieve relevant documents from user history based on the input query and incorporated them into the prompt. + +This paper further introduces collaborative filtering for personalization based on the RAG framework. Collaborative filtering has already been applied in fields such as recommender systems [33, 34, 38, 48-52] and has been proven effective. It assumes that users who have interacted with similar items share similar preferences, and recommending items from similar users to the current user can meet their needs. Some works [11, 46] learn the collaborative information between users and items through matrix factorization [19], while others [10, 40] further explore higher-order collaborative information between users and items using graph neural networks. The application of collaborative filtering in LLM personalization remains under-explored. + +Retrieval Augmented Generation. Retrieval Augmented Generation [7, 8] introduces external knowledge through document retrieval, alleviating issues such as LLM hallucinations [53], and enhancing LLMs' capabilities in knowledge-intensive tasks [17] such as open-domain question answering [14, 20]. Some works [3, 13] encode retrieved documents using separate encoders, and then fuse the results with the language model using cross-attention. A more common approach is to directly include the retrieved documents in the prompt of the LLM [2, 9, 20, 25, 35]. In recent years, this + +in-context RAG framework has also been applied to LLM personalization, which is personalized by retrieving documents from the user's history [31, 32, 57]. This paper introduces collaborative filtering by retrieving similar users' histories for better personalization. + +# 3 Problem Formulation + +Let $\mathcal{U} = \{u_1, u_2, \ldots, u_M\}$ denotes the set of all users, where $M$ is the number of users. Each user $u \in \mathcal{U}$ has a chronologically ordered history $\mathcal{H}_u = [d_1, d_2, \ldots, d_N]$ which includes all her historical documents, where $N$ is the number of documents in the history. The personalized text generation dataset is $\mathcal{D} = \{(u, q, y)_i\}_{i=1}^{|D|}$ . For each instance, $q$ is the query input by the user $u$ to the LLM, and $y$ is the target output. Our goal is first to introduce collaborative information by retrieving the top- $m$ most similar users for user $u$ : + +$$ +\mathcal {U} _ {\text {r e t r i e v e d}} = \left\{u _ {1}, u _ {2}, \dots , u _ {m} \right\}. +$$ + +Then, we use a retriever to retrieve the top- $k$ documents from each of the $m$ users' histories, resulting in a total of $m \times k$ documents. + +$$ +\mathcal {D} _ {\text {r e t r i e v e d}} = \{d _ {i, j} | i \in \{1, \dots , m \}, j \in \{1, \dots , k \} \}. +$$ + +Finally, we use a reranker to rerank these $m \times k$ documents and obtain the final top- $k$ documents: + +$$ +\mathcal {D} _ {\text {r e r a n k e d}} = \left\{d _ {i} | i \in \{1, \dots , k \} \right\}. +$$ + +These top- $k$ documents will be concatenated with the user's query $q$ as a prompt and input into the LLM, enabling it to generate a response that aligns with the target output $y$ . + +This paper primarily focuses on how to retrieve $\mathcal{U}_{\mathrm{retrieved}}$ to introduce collaborative information, and how to train the retriever and reranker so that they can effectively retrieve documents that support the personalized LLM generation. + +# 4 Our Approach + +This section introduces our method CFRAG. CFRAG's overall architecture is shown in Figure 2. As mentioned in Section 1, to address + +Challenge 1, i.e., how to introduce collaborative information, we first train user embeddings using contrastive learning to retrieve the top- $m$ most similar users (see Section 4.1). For Challenge 2, which involves retrieving documents that support personalized LLM generation, we fine-tune the personalized retriever and reranker using LLM feedback. The retriever first retrieves the top- $k$ documents from the history of each of the $m$ users, resulting in $m \times k$ documents (see Section 4.2). The reranker then reranks these documents to obtain the final top- $k$ documents as input for the LLM (see Section 4.3). + +# 4.1 User Retrieval + +First, we perform user retrieval to get the top- $m$ most similar users for user $u$ to introduce collaborative information. However, we do not have labels indicating which users are similar to each other. To address this, we employ a contrastive learning [15, 44] approach. We apply different data augmentation methods to the user history $\mathcal{H}_u$ to obtain different views of the user's history. We treat different views of the same user as positive samples and the histories of other users as negative samples, and then we use the InfoNCE [28] loss to train user embeddings for retrieval. Figure 3 illustrates the process of training user embeddings using contrastive learning. + +4.1.1 User Encoder. Specifically, we first use an embedding model (such as BERT [6], RoBERTa [26], BGE [45] etc.) $\mathbf{Emb}(\cdot)$ to encode each document in the user's history $\mathcal{H}_u$ to obtain $\mathbf{E}_u = [\mathbf{e}_1,\mathbf{e}_2,\dots ,\mathbf{e}_N]^{\intercal}\in \mathbb{R}^{N\times d}$ , where $\mathbf{e}_i = \mathbf{Emb}(d_i)$ and $d$ is the embedding dimension. To model the sequential relationships between different documents in the user's history, we introduce positional embedding $\mathbf{P}\in \mathbb{R}^{N\times d}$ . Afterward, the history $\mathcal{H}_u$ 's embedding becomes $\widehat{\mathbf{E}}_u = \mathbf{E}_u + \mathbf{P}$ . Then, we apply a transformer [39] as the user encoder to encode the user's history $\widehat{\mathbf{E}}_u$ and average the transformer's output to obtain the user's embedding: + +$$ +\mathbf {e} _ {u} = \operatorname {E n c o d e r} _ {u} (u) = \operatorname {M E A N} (\operatorname {T r m} (\widehat {\mathbf {F}} _ {u})) \in \mathbb {R} ^ {d}, \tag {1} +$$ + +where $\mathrm{Encoder}_u(\cdot)\to \mathbb{R}^d$ denotes the user encoder, $\mathrm{Trm}(\cdot)$ denotes a transformer encoder. Next, we train the transformer encoder using contrastive learning. + +4.1.2 Data Augmentation. We generate different views of $\mathcal{H}_u$ using the following three data augmentation methods: + +Document Crop. We randomly select a continuous sub-sequence of length $L_{c} = \lfloor \eta_{c}N\rfloor$ from $\mathcal{H}_u$ , where $\eta_c$ is a hyper-parameter controlling the crop ratio. The history after cropping is as follows: + +$$ +\mathcal {H} _ {u} ^ {\mathrm {c r o p}} = [ d _ {c}, d _ {c + 1}, \dots , d _ {c + L _ {c} - 1} ]. +$$ + +Document Mask. For the history $\mathcal{H}_u$ , we randomly mask out $L_{m} = \lfloor \eta_{m}N\rfloor$ documents $\mathcal{I}_{\mathrm{mask}} = \{i_1,i_2,\dots ,i_{L_m}\}$ , where $\mathcal{I}_{\mathrm{mask}}$ is the set of indices corresponding to the masked documents and $\eta_{m}$ is a hyper-parameter that controls the mask ratio. The masked documents are replaced with a special token [mask]. The history after masking is as follows: + +$$ +\begin{array}{l} \mathcal {H} _ {u} ^ {\text {m a s k}} = \left[ \hat {d} _ {1}, \hat {d} _ {2}, \dots , \hat {d} _ {N} \right], \\ \hat {d} _ {i} = \left\{ \begin{array}{l l} d _ {i}, & i \notin \mathcal {I} _ {\text {m a s k}}, \\ [ \text {m a s k} ], & i \in \mathcal {I} _ {\text {m a s k}}. \end{array} \right. \\ \end{array} +$$ + +![](images/625fdaef24bf247580e641a34943670ea15cb8f8da28784bc68a9e6a62dbe508.jpg) +Figure 3: Contrastive learning for user embedding training. + +Document Reorder. We randomly select a sub-sequence $[d_r, d_{r+1}, \ldots, d_{r+L_r-1}]$ of length $L_r = \lfloor \eta_r N \rfloor$ from $\mathcal{H}_u$ , where $\eta_r$ is a hyper-parameter controlling the reorder ratio, and then randomly shuffle the order of the documents within the sub-sequence to obtain $[\hat{d}_r, \hat{d}_{r+1}, \ldots, \hat{d}_{r+L_r-1}]$ . The history after reordering is as follows: + +$$ +\mathcal {H} _ {u} ^ {\text {r e o r d e r}} = \left[ d _ {1}, d _ {2}, \dots , \hat {d} _ {r}, \dots , \hat {d} _ {r + L _ {r} - 1}, \dots , d _ {N} \right]. +$$ + +4.1.3 Contrastive Loss. Each time, we randomly select two data augmentation methods $\mathcal{A}'$ and $\mathcal{A}''$ to generate two different views of $\mathcal{H}_u$ , denoted as $\mathcal{H}_u'$ and $\mathcal{H}_u''$ . Then, using the encoder described in Section 4.1.1, we obtain the user embeddings $\mathbf{e}_u'$ and $\mathbf{e}_u''$ corresponding to the different views. Since $\mathbf{e}_u'$ and $\mathbf{e}_u''$ are obtained through data augmentation of $\mathcal{H}_u$ , they are more similar to each other. Therefore, we treat them as positive samples for each other and use the views generated from the augmented histories of other users in the same batch as negative samples. We then perform contrastive learning using the InfoNCE [28] loss as follows: + +$$ +\begin{array}{l} \mathcal {L} _ {\mathrm {C L}} = - \left[ \log \frac {\exp \left(\cos \left(\mathbf {e} _ {u} ^ {\prime} , \mathbf {e} _ {u} ^ {\prime \prime}\right) / \tau_ {1}\right)}{\sum_ {u ^ {-} \in \mathcal {U} _ {\mathrm {n e g}}} \exp \left(\cos \left(\mathbf {e} _ {u} ^ {\prime} , \mathbf {e} _ {u ^ {-}} ^ {\prime \prime}\right) / \tau_ {1}\right)} \right. \tag {2} \\ \left. + \log \frac {\exp \left(\cos \left(\mathbf {e} _ {u} ^ {\prime} , \mathbf {e} _ {u} ^ {\prime \prime}\right) / \tau_ {1}\right)}{\sum_ {u ^ {-} \in \mathcal {U} _ {\text {n e g}}} \exp \left(\cos \left(\mathbf {e} _ {u ^ {-}} ^ {\prime}, \mathbf {e} _ {u} ^ {\prime \prime}\right) / \tau_ {1}\right)} \right], \\ \end{array} +$$ + +where $\tau_{1}$ is the temperature coefficient, $\mathcal{U}_{\mathrm{neg}}$ are the set of randomly sampled in-batch negative samples, and $\cos (\cdot)$ denotes the cosine similarity. + +4.1.4 Top- $m$ User Retrieval. After training with contrastive learning, we can use the encoder from Section 4.1.1 to obtain the user embedding $\mathbf{e}_u$ . We then calculate the cosine similarity between each pair of user embeddings and retrieve the top- $m$ most similar users $\mathcal{U}_{\mathrm{retrieved}} = \{u_1, u_2, \dots, u_m\}$ for user $u$ . Subsequently, the histories of these $m$ users will be used for further document retrieval. + +# 4.2 Document Retrieval + +After retrieving the top- $m$ users, we design a personalized retriever to retrieve the top- $k$ documents from each user's history, resulting in a total of $m \times k$ candidate documents $\mathcal{D}_{\text{retrieved}} = \{d_{i,j} | i \in \{1, \ldots, m\}, j \in \{1, \ldots, k\}\}$ . This section introduces how the retriever is designed and how it's trained to retrieve documents that better align with the requirements of personalized LLM generation. + +4.2.1 Retriever. First, we use a pre-trained dense retrieval model (such as BGE retriever [45]) to compute the semantic relevance + +![](images/92815eb6c2f51b4c9ef6ca63b04dd6a8f815dc0d32224bff35a685479dc7ee07.jpg) +Figure 4: The method of training the retriever and reranker using LLM feedback. + +between the query and the candidate documents: + +$$ +S _ {q, d} ^ {\text {r e t r i e v e r}} = \cos \left(\operatorname {E n c o d e r} _ {q} (q), \operatorname {E n c o d e r} _ {d} (d)\right), \tag {3} +$$ + +where $\mathrm{Encoder}_q(\cdot)\to \mathbb{R}^d$ and $\mathrm{Encoder}_d(\cdot)\rightarrow \mathbb{R}^d$ are the encoders for the query and the document in the retrieval model, respectively. Pre-trained retrieval models typically use $S_{q,d}^{\mathrm{retriever}}$ directly for retrieval. However, $S_{q,d}^{\mathrm{retriever}}$ only considers the semantic relevance between the query and the document. Since different users might input the same query but expect different outputs due to their varying preferences, we further account for user personalization by calculating the preference score of the user for the document as follows: + +$$ +S _ {u, d} ^ {\text {r e t r i e v e r}} = \cos \left(\mathrm {M L P} _ {1} \left(\mathbf {e} _ {u}\right), \operatorname {E n c o d e r} _ {d} (d)\right), \tag {4} +$$ + +where $\mathrm{MLP}_1: \mathbb{R}^d \to \mathbb{R}^d$ is a multi-layer perceptron that maps the user embedding to the space where the cosine similarity is computed. $\mathbf{e}_u$ is the embedding obtained in Section 4.1.1. The total score for retrieval is computed as follows: + +$$ +S _ {u, q, d} ^ {\text {r e t r i e v e r}} = (1 - \alpha) S _ {q, d} ^ {\text {r e t r i e v e r}} + \alpha S _ {u, d} ^ {\text {r e t r i e v e r}}, \tag {5} +$$ + +where $\alpha$ is a hyper-parameter that controls the weight of personalization. + +4.2.2 Training. Since the pre-trained dense retrieval model is not fine-tuned for our specific task, the retrieved results may not necessarily lead to LLM responses that better match the target output $y$ [25, 35]. However, there is no ground truth indicating which documents are better. Therefore, we evaluate the difference between the LLM's output and the target output $y$ , using this as a label to train the retrieval model. Figure 4 shows the process of training the retriever using LLM feedback. + +Specifically, we first use the pre-trained retrieval model to retrieve the top- $k$ documents from each of the $m$ users' histories based on $S_{q,d}^{\mathrm{retriever}}$ in Eq. (3), resulting in a total of $m \times k$ candidate documents. These documents are then concatenated with the query one by one and used as prompts for the LLM, producing $m \times k$ outputs: + +$$ +\{O _ {q, d _ {i, j}} = \mathrm {L L M} (q, d _ {i, j}) | i \in \{1, \dots , m \}, j \in \{1, \dots , k \} \}, +$$ + +where $\mathrm{LLM}(q, d_{i,j})$ represents the output generated by inputting the concatenated query $q$ and document $d_{i,j}$ into the LLM. Then, based on the quality of these outputs, we can calculate the distribution of + +these candidate documents as follows: + +$$ +p _ {\text {L L M}} \left(d _ {i, j} \mid q, y\right) = \frac {\exp (\operatorname {e v a l} \left(y , O _ {q , d _ {i , j}}\right))}{\sum_ {i = 1} ^ {m} \sum_ {j = 1} ^ {k} \exp (\operatorname {e v a l} \left(y , O _ {q , d _ {i , j}}\right))}, \tag {6} +$$ + +where $\text{eval}(\cdot)$ measures the difference between the target output $y$ and the LLM's output, using metrics such as ROUGE [24] score. A larger value returned by $\text{eval}(\cdot)$ indicates a better-generated result. Similarly, we can also calculate the score distribution of the candidate documents by the retrieval model based on $S_{u,q,d}^{\text{retriever}}$ in Eq. (5): + +$$ +p _ {\text {r e t r i e v e r}} \left(d _ {i, j} \mid q, u\right) = \frac {\exp \left(S _ {u , q , d _ {i , j}} ^ {\text {r e t r i e v e r}}\right)}{\sum_ {i = 1} ^ {m} \sum_ {j = 1} ^ {k} \exp \left(S _ {u , q , d _ {i , j}} ^ {\text {r e t r i e v e r}}\right)}. \tag {7} +$$ + +We aim for the retrieval model to retrieve documents that lead to better LLM-generated results, which means making the distribution $p_{\mathrm{retriever}}(d|q,u)$ in Eq. (7) closer to the distribution $p_{\mathrm{LLM}}(d|q,y)$ in Eq (6). Therefore, we compute the KL divergence between the two distributions as the loss to optimize the retriever: + +$$ +\mathcal {L} _ {\text {r e t r i e v e r}} = \mathrm {K L} \left(p _ {\text {r e t r i e v e r}} (d | q, u) \mid p _ {\text {L L M}} (d | q, y)\right). \tag {8} +$$ + +# 4.3 Document Rerank + +After retrieving $\mathcal{D}_{\mathrm{retrieved}}$ through the retriever, in this section, we further refine the results by reranking $\mathcal{D}_{\mathrm{retrieved}}$ to obtain the final top- $k$ ranked results $\mathcal{D}_{\mathrm{reranked}} = \{d_i | i \in \{1, \dots, k\}\}$ . + +4.3.1 Reranker. We use a pre-trained cross-encoder (such as the BGE reranker [45]) to encode the query and document, obtaining the hidden state corresponding to the [CLS] token from the last layer: + +$$ +\mathbf {h} _ {q, d} = \operatorname {C r o s s E n c o d e r} (q, d), \tag {9} +$$ + +where $\mathbf{h}_{q,d} \in \mathbb{R}^d$ . Similarly, when reranking, in addition to considering the semantic relevance between query and document, we also take into account the user's personalized preferences. However, since the cross-encoder does not encode documents separately, it cannot compute the cosine similarity between users and documents as shown in Eq. (4) to express the user preference score. Therefore, we directly concatenate the user embeddings to the output of the cross-encoder to account for the influence of user preferences. The overall score used for reranking is calculated as follows: + +$$ +S _ {u, q, d} ^ {\text {r e r a n k e r}} = \mathrm {M L P} _ {3} \left(\operatorname {C O N C A T} \left(\mathbf {h} _ {q, d}, \operatorname {M L P} _ {2} (\mathbf {e} _ {u})\right)\right), \tag {10} +$$ + +where $\mathrm{MLP}_2: \mathbb{R}^d \to \mathbb{R}^d$ and $\mathrm{MLP}_3: \mathbb{R}^{2d} \to \mathbb{R}$ are two multi-layer perceptions. $\mathrm{CONCAT}(\cdot)$ denotes the concatenation operation. + +4.3.2 Training. Similar to the retriever's training in Section 4.2.2, we also want the reranker to assign higher scores to the documents that lead to better LLM-generated results. Therefore, we train the reranker using a similar approach. + +We use the trained retrieval model from Section 4.2.2 to retrieve top- $k$ documents from the history of each of the $m$ users, resulting in a total of $m \times k$ candidate documents. These documents are concatenated with the query $q$ and used as prompts for the LLM, producing $m \times k$ outputs. Similar to Eq.(6), we can obtain the distribution $p_{\mathrm{LLM}}(d|q,y)$ of these candidate documents. Based on + +Table 1: Statistics of the datasets used in this paper. + +
DatasetLaMP-1LaMP-2LaMP-3LaMP-4LaMP-5LaMP-7
#Users6,54292920,0001,64314,68213,437
#Train6,5425,07320,00012,50014,68213,437
#Dev1,5001,4102,5001,5001,5001,498
#Test1,5001,5572,5001,8001,5001,500
+ +$S_{u,q,d}^{\mathrm{reranker}}$ in Eq. (10), we can also get the score distribution of the candidate documents by the reranker: + +$$ +p _ {\text {r e r a n k e r}} \left(d _ {i, j} \mid q, u\right) = \frac {\exp \left(S _ {u , q , d _ {i , j}} ^ {\text {r e r a n k e r}}\right)}{\sum_ {i = 1} ^ {m} \sum_ {j = 1} ^ {k} \exp \left(S _ {u , q , d _ {i , j}} ^ {\text {r e r a n k e r}}\right)}. \tag {11} +$$ + +We compute the KL divergence between distributions $p_{\mathrm{reranker}}(d|q,u)$ and $p_{\mathrm{LLM}}(d|q,y)$ as the loss to optimize the reranker: + +$$ +\mathcal {L} _ {\text {r e r a n k e r}} = \mathrm {K L} \left(p _ {\text {r e r a n k e r}} (d | q, u) \mid p _ {\text {L L M}} (d | q, y)\right). \tag {12} +$$ + +The loss allows the reranker to assign higher scores to documents that enable better personalized generation by the LLM. + +# 4.4 Discussion + +Computational Efficiency. CFRAG comprises three modules. The User Encoder is a lightweight, single-layer Transformer with inputs derived from a frozen BGE embedding (dimension 768), resulting in minimal parameter overhead. The retriever and reranker are comparable in size to BERT (approximately 100M parameters). Overall, the training cost is low due to the modest parameter size. During inference, user and document embeddings can be precomputed, requiring only similarity calculations for retrieval, ensuring minimal computational cost. This efficiency enables our method to generalize quickly to new datasets. + +# 5 Experiments + +We conducted experiments to evaluate the performance of CFRAG. The source code is available. + +# 5.1 Experimental Setup + +5.1.1 Dataset. We conducted experiments on the Language Model Personalization (LaMP) [32] benchmark, which consists of seven personalized text generation tasks. We excluded LaMP-6 because its data is not publicly available. The remaining tasks include: LaMP-1 (Personalized Citation Identification); LaMP-2 (Personalized Movie Tagging); LaMP-3 (Personalized Product Rating); LaMP-4 (Personalized News Headline Generation); LaMP-5 (Personalized Scholarly Title Generation); LaMP-7 (Personalized Tweet Paraphrasing). We used the time-based split provided by LaMP to divide the data into training, validation, and test sets. The statistics of these datasets are shown in Table 1. + +5.1.2 Evaluation Metrics. Following previous works [31, 32], we evaluate Accuracy and F-1 score for LaMP-1 and LaMP-2, mean absolute error (MAE) and root mean squared error (RMSE) for LaMP-3, ROUGE-1 and ROUGE-L [24] for LaMP-4, LaMP-5 and LaMP-7. + +5.1.3 Baselines. In this work, we compare CFRAG with the following methods. + +No Personalization: We directly input the user's query into the LLM without retrieving from user history, using this as the non-personalized baseline. We refer to this method as Zero Shot. + +Personalized Baselines: We compared CFRAG with methods that personalize by retrieving from user history using different retrieval models, including: (1) Random selects $k$ items randomly from the user's history; (2) Recency selects the most recent $k$ items from the user's history; (3) BM25 [30] retrieves top- $k$ items from the user's history using BM25; (4) BGE [45] retrieves top- $k$ items from the user's history using BGE retriever; (5) ROPG [31] optimizes the dense retrieval model based on the results generated by the LLM. + +5.1.4 Implementation Details. We conducted experiments on two LLMs: Llama3-8B-Instruct [1] and Qwen2-7B-Instruct [47]. In this paper, we do not fine-tune the LLM because fine-tuning is costly and could cause the LLM to retain user information, potentially compromising user privacy. To ensure a fair comparison, we use greedy search for text generation. The dense retrieval model used in all methods is bge-base-en-v1.5² [45]. The cross-encoder used for reranker in Section 4.3.1 is bge-reranker-base³ [45]. All hyperparameters for the baselines are searched according to the settings in the original papers. The embedding dimension $d$ is set to 768. The number of retrieved documents $k$ is set to 5, and the number of retrieved users $m$ is tuned among $\{2,3,4,5,6\}$ . The $\mathrm{Trm}(\cdot)$ encoder in Eq. (1) has 1 layer and 2 heads. The hyperparameters $L_{c}, L_{m}$ , and $L_{r}$ used for data augmentation in Section 4.1.2 are set to 0.7, 0.3, and 0.3, respectively. The temperature parameters $\tau_{1}$ in Eq. (2) is tuned among $\{0.01, 0.1, 1\}$ . The weight $\alpha$ in Eq. (5) is tuned among $[0.01, 1.0]$ . The learning rate is tuned among $\{1e - 3, 1e - 4, 1e - 5\}$ . Adam [18] is used to conduct the optimization. The data input and output formats are provided in Appendix A. + +# 5.2 Experimental Results + +Experimental results are shown in Table 2. From the results, we can find that: + +- Firstly, compared to existing methods, CFRAG achieved the best results across six datasets in the LaMP benchmark. This demonstrates the effectiveness of introducing collaborative information between users into RAG and using LLM feedback to tune the retriever and reranker to ensure that they can retrieve the documents that support the personalized LLM generation. +- Secondly, we can observe that even randomly selecting user history outperforms the zero-shot method without any user history. This highlights the importance of incorporating user history to reflect user preferences for personalized generation. Additionally, we observe that retrieval methods perform better than simply selecting the most recent user history, underscoring the importance of retrieval. +- Thirdly, we also observe that, in most cases, RAG and ROPG methods using dense retrieval models outperform BM25. Additionally, CFRAG, which fine-tunes the retriever based on LLM feedback, achieves better results. This shows, on the one hand, that the better the retriever, the better the generation results, and on the other + +Table 2: Comparison of the performance of CFRAG with other approaches on the LaMP benchmark. $\uparrow$ indicates that a higher value for the corresponding metric is better, while $\downarrow$ indicates that a lower value is better. The best and the second-best methods are highlighted in bold and underlined fonts, respectively. “*” indicates improvements over the second-best methods are statistically significant ( $t$ -test, $p$ -value $< 0.05$ ). + +
LLMsRetrieversLaMP-1LaMP-2LaMP-3LaMP-4LaMP-5LaMP-7
Accuracy ↑F1 ↑Accuracy ↑F1 ↑MAE ↓RMSE ↓ROUGE-1 ↑ROUGE-L ↑ROUGE-1 ↑ROUGE-L ↑ROUGE-1 ↑ROUGE-L ↑
Llama3Zero Shot0.49930.24970.29930.02000.50240.79040.14060.12280.44170.36500.30790.2593
Random0.57400.28700.39290.02620.41040.78330.17870.15710.45330.38750.31370.2508
Recency0.60400.30200.39930.02660.39800.74910.18560.16500.45730.39280.33250.2686
BM25 [30]0.62400.31200.42550.02840.40600.76660.18030.15910.46370.39780.34490.2780
BGE [45]0.63270.31630.45740.03050.35280.69690.18110.16110.46380.39580.33910.2742
ROPG [31]0.64400.32200.46810.03120.34560.69220.18380.16340.46380.39560.35300.2881
CFRAG0.6533*0.3267*0.5340*0.0356*0.2812*0.5997*0.1957*0.1745*0.4810*0.4153*0.3752*0.3055*
Qwen2Zero Shot0.50000.25000.29080.01940.44440.78050.12640.10810.41440.34680.39720.3229
Random0.56330.28170.32840.02190.40000.76210.15810.13770.45800.39210.42910.3564
Recency0.57730.28870.33260.02220.39120.75630.15810.13690.45620.39130.42470.3525
BM25 [30]0.59870.29930.35320.02350.42280.80270.15800.13740.46130.39500.42900.3570
BGE [45]0.60800.30400.36740.02450.36960.72110.16130.13980.45710.39100.43470.3605
ROPG [31]0.60930.30470.38300.02550.36720.73320.16170.14010.46000.39460.43450.3610
CFRAG0.61330.30670.3957*0.02640.3536*0.7071*0.16210.14120.4703*0.4029*0.4425*0.3708*
+ +Table 3: Ablation Study of CFRAG on LaMP based on Llama3. "MEAN" represents using the average of user history document embeddings as the user embedding. "w/o" indicates the corresponding module in CFRAG is removed. + +
VariantsLaMP-1LaMP-2LaMP-3LaMP-4LaMP-5LaMP-7
#ModelAccuracy ↑F1 ↑Accuracy ↑F1 ↑MAE ↓RMSE ↓ROUGE-1 ↑ROUGE-L ↑ROUGE-1 ↑ROUGE-L ↑ROUGE-1 ↑ROUGE-L ↑
(0)CFRAG0.65330.32670.53400.03560.28120.59970.19570.17450.48100.41530.37520.3055
(1)w/o User Retrieval0.64000.32000.49360.03290.34440.69250.19140.16890.46420.39630.35660.2903
(2)User Retrieval (MEAN)0.64200.32100.50640.03380.34120.68670.18470.16390.47790.41130.37220.3022
(3)w/o Retriever Tuning0.64530.32270.49790.03320.28520.60700.19160.17040.47420.40480.35990.2940
(4)w/o Sretriever in Eq. (5)0.63330.31670.51130.03410.33240.68610.18950.16960.47500.40880.37320.3039
(5)w/o Reranker Tuning0.63070.31530.46950.03130.36960.73920.17660.15500.47140.40680.34320.2775
(6)w/o eu in Eq. (10)0.63130.31570.49930.03330.34200.69250.18870.16720.47720.41230.37310.3030
+ +hand, fine-tuning the retriever based on LLM feedback to ensure it can retrieve the documents that meet the personalized generation needs of LLM is crucial. + +# 5.3 Ablation Study + +We conducted an ablation study to investigate the effectiveness of different modules in CFRAG, as shown in Table 3. CFRAG consists of three modules: User Retrieval, Document Retrieval, and Document Rerank. We removed different modules from CFRAG one by one to verify the effectiveness of each module. + +5.3.1 User Retrieval. First, we validated the effectiveness of introducing collaborative information by retrieving similar users, as shown in row (1) of Table 3. It can be seen that without retrieving similar users and only retrieving from the current user's history, the performance is worse than that of CFRAG, highlighting the importance of collaborative information. + +We also validated the effectiveness of training user embeddings using contrastive learning. For comparison, we directly averaged the document embeddings from the user's history to create user embeddings for retrieval, as shown in row (2) of Table 3. It can be seen that CFRAG, which uses user embeddings trained with contrastive learning, achieves better results. This is because contrastive learning constructs user similarity labels through data augmentation and uses the InfoNCE loss to help the embeddings learn which users are similar. In contrast, using mean pooling directly cannot capture user similarity. + +![](images/94633987414c00f738c3423cbe8c6e65847b434df8ed4ae5565aeb3a005e42e8.jpg) +(a) LaMP-1 + +![](images/79735191f069bd3fe577f6f68843cfd442708060861370a8d9a6cbcee6d995b3.jpg) +(b) LaMP-5 +Figure 5: Results of using different methods to select users for introducing collaborative information. "random" indicates randomly selecting $m$ users; "top- $(m - 2m)$ " represents selecting users whose similarity to the current user ranks between $m$ and $2m$ ; "top- $m$ " indicates selecting the most similar $m$ users. + +5.3.2 Document Retrieval. We also validated the effectiveness of the personalized retriever we designed, as shown in Table 3, rows (3) and (4). First, in row (3), we can see that without fine-tuning based on LLM feedback, using a pre-trained dense retrieval model leads to worse performance. This indicates that retrieval cannot be based solely on semantic relevance, ensuring that the retrieved documents support personalized LLM generation is crucial. Additionally, we analyzed the impact of removing $S_{u,d}^{\mathrm{retriever}}$ from Eq. (4) and only using $S_{q,d}^{\mathrm{retriever}}$ from Eq. (3) for retrieval, as indicated in row (4). The results decreased, demonstrating that users' personalized preferences should also be considered during retrieval, rather + +![](images/86faea8a327cef5e2b9ac66cf7e276f3ee4ebff4fccc9a5f5db5d20515b61ee6.jpg) +(a) LaMP-1 + +![](images/e096674f0f80370de56434a59eb14a7c664e0ab43e4a24769d974004128917ef.jpg) +(b) LaMP-5 + +![](images/0fe4c17f7d3b0064ee98d9689518a2b9f9a84baea0031c9d564a3783a1b2fd6b.jpg) +Figure 6: Results using different retrievers and rerankers. "BM25" indicates using BM25 as both the retriever and reranker, while "w/o Tuning" refers to using pre-trained retrievers and rerankers without LLM feedback fine-tuning. +(a) LaMP-1 +Figure 7: Performance under different numbers of retrieved documents from the current user $u$ 's history in the top- $k$ documents. + +![](images/20182a7db51c3fa3024198281989434352d72122a26392fd7dce39da64ecec9d.jpg) +(b) LaMP-5 + +than solely focusing on the semantic relevance between the query and documents. + +5.3.3 Document Rerank. We also validated the effectiveness of the personalized reranker we designed, as shown in Table 3, rows (5) and (6). First, in row (5), it can be seen that using a pre-trained reranker leads to worse results, highlighting the importance of fine-tuning based on LLM feedback. We also observed the effect of removing $\mathbf{e}_u$ from Eq. (10) and only using $\mathbf{h}_{q,d}$ to calculate $S_{q,d}^{\text{reranker}}$ for ranking, as indicated in row (6). The results decreased in this case, highlighting the importance of considering users' personalized preferences in the reranker. + +# 5.4 Experimental Analysis + +As mentioned in Section 1, adapting collaborative filtering into personalized RAG faces two challenges. Challenge 1: How to introduce collaborative information? Challenge 2: How to retrieve documents that support personalized LLM generation? In this section, we conduct experimental analysis to further demonstrate the effectiveness of our method in addressing these two challenges. Additionally, we provide further analysis of the results of CFRAG and the impact of hyper-parameters. Due to space limitations, we conducted experimental analysis on the LaMP-1 and LaMP-5 datasets. + +5.4.1 Effectiveness of User Retrieval using Contrastive Learning (Challenge 1). As described in Section 1, to address Challenge 1, we train user embeddings using contrastive learning to retrieve the top- $m$ most similar users for introducing collaborative information. To validate the effectiveness of this approach, we compared it with randomly selecting $m$ users and selecting users from top- $m$ to $2m$ , as shown in Figure 5. First, we can see that randomly selecting + +![](images/9c481f693eb54eb4efbf0d5c0f2af5aa8f4d150f073794b47f3ad1f2fb237cf5.jpg) +(a) LaMP-1 + +![](images/acb69f3db4e448b0e25bc42ebbf3669d4e4b97072a06599b923f91b4cb5074dc.jpg) +(b) LaMP-5 + +![](images/83cde8aa2ad4ce4f601933f54a812e13703cb17f9d80156a716ccbf53ac36f6e.jpg) +Figure 8: Performance under different numbers of retrieved users. The performance is the worst since no collaborative information is introduced when $m = 1$ . +(a) LaMP-1 +Figure 9: Performance under different numbers of retrieved documents per user. + +![](images/208072a2dd51268962130922f22ad7a74e36f3ec865e34dc65a393a56cbe2a6c.jpg) +(b) LaMP-5 + +users yields the worst performance, indicating that collaborative information cannot be introduced indiscriminately. Secondly, the results show that retrieving users from the range of top- $m$ to $2m$ performs worse than using the top- $m$ users, suggesting that information from users who are more similar to the current user $u$ is more important. These highlight the importance of retrieving the most similar top- $m$ users + +5.4.2 Effectiveness of Document Retrieval using LLM Feedback (Challenge 2). As mentioned in Section 1, to address Challenge 2, we fine-tune the retriever and reranker using feedback from the content generated by the LLM, enabling them to retrieve documents that better meet personalized LLM generation needs. To validate its effectiveness, we compared the results with those using retrievers and rerankers without LLM feedback fine-tuning, as well as using BM25 as the retriever and reranker, as shown in Figure 6. It can be observed that CFRAG performs the best, highlighting the importance of fine-tuning with LLM feedback rather than relying solely on semantic relevance. + +5.4.3 Impact of the Number of Documents from the Current User. To further validate that CFRAG enhances personalization by incorporating collaborative information, we observed the impact of the number of documents from the current user in the final top- $k$ documents on the results, as shown in Figure 7. We varied the number of documents retrieved from the current user's history in the top- $k$ documents from 0 to 5, with the remaining documents retrieved from similar users' histories. The results indicate that retrieving only from the current user's history leads to poor performance, while appropriately retrieving documents from similar users' histories significantly improves the results. This verifies the importance of incorporating collaborative information. + +Table 4: The format of input, output, and user history for different datasets in the LaMP [32] benchmark. In the input, $\{history_{i}\}$ will be replaced by the retrieved $i$ -th history, and each history is represented as shown in the "User History" column. The other italicized text in the input is replaced with the user's input. For text generation tasks, to ensure that the LLM does not generate irrelevant information, we instruct the LLM in the input to generate in JSON format, and then we extract the LLM's prediction from the JSON-formatted output. + +
TaskInputOutputUser History
LaMP-1The historical profiles are as follows: {history1} ... {historyk}. +Based on the historical profiles provided, please choose one of the following two references that is more relevant to the user's input title: [1] {reference1}; [2] {reference2}. Please just answer with “[1” or “[2” without explanation. “title”: {title}.[1]“title”: {title} +“abstract”: {abstract}
LaMP-2The historical profiles are as follows: {history1} ... {historyk}. +Based on the historical profiles provided, please select the tag from [sci-fi, based on a book, comedy ... ] that is most relevant to the user's input description. Please just answer with the tag name without explanation. “description”: {description}; “tag”:comedy“description”: {description}; +“tag”: {tag}
LaMP-3The historical profiles are as follows: {history1} ... {historyk}. +Based on the historical profiles provided, what is the score of the following review on a scale of 1 to 5? just answer with 1, 2, 3, 4, or 5 without further explanation. “review”: {review}; “score”:5“review”: {review} +“score”: {score}
LaMP-4The historical profiles are as follows: {history1} ... {historyk}. +Based on the historical profiles provided, please generate a title for the given user's input text. Please generate it in the following format: {"title": "generated title"} without explanation, and use only English. “text”: {text}; “title”:{“title”: Finding Happiness +After Divorce - It Can Happen}“text”: {text} +“title”: {title}
LaMP-5The historical profiles are as follows: {history1} ... {historyk}. +Based on the historical profiles provided, please generate a title for the given user's input abstract. Please generate it in the following format: {"title": "generated title"} without explanation, and use only English. “abstract”: {abstract}; “title”:{“title”: Link-Reliability Based +Two-Hop Routing for +Wireless Sensor Networks.}“abstract”: {abstract} +“title”: {title}
LaMP-7The historical profiles are as follows: {history1} ... {historyk}. +Based on the style pattern of the historical tweets provided, please paraphrase the user's input tweet without any explanation before or after it. Please generate it in the following format: {"tweet": "generated tweet"} without explanation, and use only English. “tweet”: {tweet}.{“tweet”:lilxcutiesworld the +danny picture is GOOD!! +I really like it.}“tweet”: {tweet}
+ +5.4.4 Impact of the Number of Retrieved Users. Since we enhance personalized text generation by introducing collaborative filtering, we further explored how much collaborative information to introduce, specifically the impact of the number of retrieved users on the results, as shown in Figure 8. In LaMP-1, retrieving too few or too many users leads to poorer performance, with the best results at 4 users. In LaMP-5, the performance improves as the number of users increases. This highlights the importance of introducing collaborative filtering, but it also indicates that excessive introduction can lead to decreased effectiveness. + +5.4.5 Impact of the Number of Retrieved Documents. We also analyzed the impact of the number of retrieved documents, $k$ , on the results, as shown in Figure 9. It can be observed that as the number of retrieved documents increases, performance improves, indicating the importance of retrieving user history to reflect user preferences for enhancing LLM-generated results. Since more documents lead to longer prompts and slower LLM generation, we chose $k = 5$ for our experiments. + +# 6 Conclusion + +In this paper, we propose CFRAG, which adapts collaborative filtering into RAG to personalize LLMs. To introduce collaborative information without explicit user labels and retrieve documents that support personalized LLM generation, we first train user embeddings through contrastive learning to retrieve similar users. Then, we design the personalized retriever and reranker that considers user preferences during retrieval and fine-tune them using LLM feedback. The results on the Language Model Personalization (LaMP) benchmark validate the effectiveness of CFRAG. The experimental analysis also confirms the effectiveness of each module within CFRAG. + +# A Appendix: Prompts + +We provide detailed formats for the inputs, outputs, and user histories for the LLM across different datasets, as shown in Table 4. + +# References + +[1] AI@Meta. 2024. Llama 3 Model Card. (2024). https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md +[2] Akari Asai, Zegiu Wu, Yizhong Wang, Avirup Sil, and Hannaneh Hajishirzi. [n.d.]. Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection. In The Twelfth International Conference on Learning Representations. +[3] Sebastian Borgeaud, Arthur Mensch, et al. 2022. Improving language models by retrieving from trillions of tokens. In International conference on machine learning, PMLR, 2206-2240. +[4] Jin Chen, Zheng Liu, et al. 2024. When large language models meet personalization: Perspectives of challenges and opportunities. World Wide Web 27, 4 (2024), 42. +[5] Sunhao Dai, Ninglu Shao, et al. 2023. Uncovering chatgpt's capabilities in recommender systems. In Proceedings of the 17th ACM Conference on Recommender Systems. 1126-1132. +[6] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers). +[7] Wenqi Fan, Yujuan Ding, Liangbo Ning, Shijie Wang, Hengyun Li, Dawei Yin, Tat-Seng Chua, and Qing Li. 2024. A Survey on RAG Meeting LLMs: Towards Retrieval-Augmented Large Language Models. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, 6491-6501. +[8] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, and Haofen Wang. 2023. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997 (2023). +[9] Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. 2020. Retrieval augmented language model pre-training. In International conference on machine learning. PMLR, 3929-3938. +[10] Xiangnan He, Kuan Deng, Xiang Wang, Yan Li, Yongdong Zhang, and Meng Wang. 2020. Lightgcn: Simplifying and powering graph convolution network for recommendation. In Proceedings of the 43rd International ACM SIGIR conference on research and development in Information Retrieval. 639-648. +[11] Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. 2017. Neural collaborative filtering. In Proceedings of the 26th international conference on world wide web. 173-182. +[12] Edward J Hu, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. [n.d.]. LoRA: Low-Rank Adaptation of Large Language Models. In International Conference on Learning Representations. +[13] Gautier Izacard and Edouard Grave. 2021. Leveraging Passage Retrieval with Generative Models for Open Domain Question Answering. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume. 874-880. +[14] Gautier Izacard, Patrick Lewis, et al. 2022. Few-shot learning with retrieval augmented language models. arXiv preprint arXiv:2208.03299 1, 2 (2022), 4. +[15] Ashish Jaiswal, Ashwin Ramesh Babu, Mohammad Zaki Zadeh, Debapriya Banerjee, and Fillia Makedon. 2020. A survey on contrastive self-supervised learning. Technologies 9, 1 (2020), 2. +[16] Joel Jang, Seungone Kim, et al. 2023. Personalized soups: Personalized large language model alignment via post-hoc parameter merging. arXiv preprint arXiv:2310.11564 (2023). +[17] Nikhil Kandpal, Haikang Deng, Adam Roberts, Eric Wallace, and Colin Raffel. 2023. Large language models struggle to learn long-tail knowledge. In International Conference on Machine Learning. PMLR, 15696-15707. +[18] Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014). +[19] Yehuda Koren, Robert Bell, and Chris Volinsky. 2009. Matrix factorization techniques for recommender systems. Computer 42, 8 (2009), 30-37. +[20] Patrick Lewis, Ethan Perez, et al. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems 33 (2020), 9459-9474. +[21] Cheng Li, Mingyang Zhang, Qiaozhu Mei, Yaqing Wang, Spurthi Amba Hombaiah, Yi Liang, and Michael Bendersky. 2023. Teach LLMs to Personalize-An Approach inspired by Writing Education. arXiv preprint arXiv:2308.07968 (2023). +[22] Junyi Li, Tianyi Tang, Wayne Xin Zhao, Jian-Yun Nie, and Ji-Rong Wen. 2024. Pre-trained language models for text generation: A survey. Comput. Surveys 56, 9 (2024), 1-39. +[23] Xinyu Li, Zachary C Lipton, and Liu Leqi. 2024. Personalized language modeling from personalized human feedback. arXiv preprint arXiv:2402.05133 (2024). +[24] Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out. 74-81. +[25] Xi Victoria Lin, Xilun Chen, Mingda Chen, Weijia Shi, Maria Lomeli, Richard James, Pedro Rodriguez, Jacob Kahn, Gergely Szilvasy, Mike Lewis, et al. [n.d.]. RA-DIT: Retrieval-Augmented Dual Instruction Tuning. In The Twelfth International Conference on Learning Representations. +[26] Yinhan Liu. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692 (2019). + +[27] Sheshera Mysore, Zhuoran Lu, et al. 2023. Pearl: Personalizing large language model writing assistants with generation-calibrated retrievers. arXiv preprint arXiv:2311.09180 (2023). +[28] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. 2018. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748 (2018). +[29] Chris Richardson, Yao Zhang, Kellen Gillespie, Sudipta Kar, Arshdeep Singh, Zeynab Raeesy, Omar Zia Khan, and Abhinav Sethy. 2023. Integrating summarization and retrieval for enhanced personalization via large language models. arXiv preprint arXiv:2310.20081 (2023). +[30] Stephen E Robertson, Steve Walker, Susan Jones, Micheline M Hancock-Beaulieu, Mike Gatford, et al. 1995. Okapi at TREC-3. Nist Special Publication Sp 109 (1995), 109. +[31] Alireza Salemi, Surya Kallumadi, and Hamed Zamani. 2024. Optimization methods for personalizing large language models through retrieval augmentation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 752-762. +[32] Alireza Salemi, Sheshera Mysore, Michael Bendersky, and Hamed Zamani. 2023. Lamp: When large language models meet personalization. arXiv preprint arXiv:2304.11406 (2023). +[33] Chenglei Shen, Xiao Zhang, Teng Shi, Changshuo Zhang, Guofu Xie, and Jun Xu. 2024. A survey of controllable learning: Methods and applications in information retrieval. arXiv preprint arXiv:2407.06083 (2024). +[34] Teng Shi, Zihua Si, Jun Xu, Xiao Zhang, Xiaoxue Zang, Kai Zheng, Dewei Leng, Yanan Niu, and Yang Song. 2024. UniSAR: Modeling User Transition Behaviors between Search and Recommendation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1029-1039. +[35] Weijia Shi, Sewon Min, Michihiro Yasunaga, Minjoon Seo, Richard James, Mike Lewis, Luke Zettlemoyer, and Wen-tau Yih. 2024. REPLUG: Retrieval-Augmented Black-Box Language Models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers). 8364-8377. +[36] Zhaoxuan Tan, Zheyuan Liu, and Meng Jiang. 2024. Personalized Pieces: Efficient Personalized Large Language Models through Collaborative Efforts. arXiv preprint arXiv:2406.10471 (2024). +[37] Zhaoxuan Tan, Qingkai Zeng, Yijun Tian, Zheyuan Liu, Bing Yin, and Meng Jiang. 2024. Democratizing Large Language Models via Personalized Parameter-Efficient Fine-tuning. arXiv:2402.04401 [cs.CL] https://arxiv.org/abs/2402.04401 +[38] Jiakai Tang, Sunhao Dai, Teng Shi, Jun Xu, Xu Chen, Wen Chen, Wu Jian, and Yuning Jiang. 2025. Think Before Recommend: Unleashing the Latent Reasoning Power for Sequential Recommendation. arXiv:2503.22675 [cs.IR] https://arxiv.org/abs/2503.22675 +[39] A Vaswani. 2017. Attention is all you need. Advances in Neural Information Processing Systems (2017). +[40] Xiang Wang, Xiangnan He, Meng Wang, Fuli Feng, and Tat-Seng Chua. 2019. Neural graph collaborative filtering. In Proceedings of the 42nd international ACM SIGIR conference on Research and development in Information Retrieval. 165-174. +[41] Likang Wu, Zhi Zheng, Zhaopeng Qiu, Hao Wang, Hongchao Gu, Tingjia Shen, Chuan Qin, Chen Zhu, Hengshu Zhu, Qi Liu, et al. 2024. A survey on large language models for recommendation. World Wide Web 27, 5 (2024), 60. +[42] Xinghao Wu, Xuefeng Liu, Jianwei Niu, Haolin Wang, Shaojie Tang, and Guogang Zhu. 2024. FedLoRA: When Personalized Federated Learning Meets Low-Rank Adaptation. (2024). +[43] Zeqiu Wu, Yushi Hu, Weijia Shi, Nouha Dziri, Alane Suhr, Prithviraj Ammanabrolu, Noah A Smith, Mari Ostendorf, and Hannaneh Hajishirzi. 2024. Fine-grained human feedback gives better rewards for language model training. Advances in Neural Information Processing Systems 36 (2024). +[44] Zhuofeng Wu, Sinong Wang, Jiatao Gu, Madian Khabsa, Fei Sun, and Hao Ma. 2020. Clear: Contrastive learning for sentence representation. arXiv preprint arXiv:2012.15466 (2020). +[45] Shitao Xiao, Zheng Liu, Peitian Zhang, and Niklas Muennighoff. 2023. C-Pack: Packaged Resources To Advance General Chinese Embedding. arXiv:2309.07597 [cs.CL] +[46] Hong-Jian Xue, Xinyu Dai, Jianbing Zhang, Shujian Huang, and Jiajun Chen. 2017. Deep matrix factorization models for recommender systems.. In IJCAI, Vol. 17. Melbourne, Australia, 3203-3209. +[47] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Cheng-peng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. 2024. Qwen2 technical report. arXiv preprint arXiv:2407.10671 (2024). +[48] Changshuo Zhang, Teng Shi, Xiao Zhang, Qi Liu, Ruobing Xie, Jun Xu, and Ji-Rong Wen. 2024. Modeling Domain and Feedback Transitions for Cross-Domain Sequential Recommendation. arXiv preprint arXiv:2408.08209 (2024). +[49] Changshuo Zhang, Teng Shi, Xiao Zhang, Yanping Zheng, Ruobing Xie, Qi Liu, Jun Xu, and Ji-Rong Wen. 2024. QAGCF: Graph Collaborative Filtering for Q&A Recommendation. arXiv preprint arXiv:2406.04828 (2024). +[50] Changshuo Zhang, Xiao Zhang, Teng Shi, Jun Xu, and Ji-Rong Wen. 2025. Test-Time Alignment for Tracking User Interest Shifts in Sequential Recommendation. arXiv:2504.01489 [cs.IR] https://arxiv.org/abs/2504.01489 + +[51] Kepu Zhang, Teng Shi, Sunhao Dai, Xiao Zhang, Yinfeng Li, Jing Lu, Xiaoxue Zang, Yang Song, and Jun Xu. 2024. SAQRec: Aligning Recommender Systems to User Satisfaction via Questionnaire Feedback. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 3165-3175. +[52] Xiao Zhang, Teng Shi, Jun Xu, Zhenhua Dong, and Ji-Rong Wen. 2024. Model-Agnostic Causal Embedding Learning for Counterfactually Group-Fair Recommendation. IEEE Transactions on Knowledge and Data Engineering (2024). +[53] Yue Zhang, Yafu Li, Leyang Cui, Deng Cai, Lemao Liu, Tingchen Fu, Xinting Huang, Enbo Zhao, Yu Zhang, Yulong Chen, et al. 2023. Siren's song in the AI ocean: a survey on hallucination in large language models. arXiv preprint arXiv:2309.01219 (2023). + +[54] Wayne Xin Zhao, Jing Liu, Ruiyang Ren, and Ji-Rong Wen. 2024. Dense text retrieval based on pretrained language models: A survey. ACM Transactions on Information Systems 42, 4 (2024), 1-60. +[55] Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, et al. 2023. A survey of large language models. arXiv preprint arXiv:2303.18223 (2023). +[56] Yutao Zhu, Huaying Yuan, Shuting Wang, Jiongnan Liu, Wenhan Liu, Chenlong Deng, Haonan Chen, Zhicheng Dou, and Ji-Rong Wen. 2023. Large language models for information retrieval: A survey. arXiv preprint arXiv:2308.07107 (2023). +[57] Yuchen Zhuang, Haotian Sun, Yue Yu, Qifan Wang, Chao Zhang, and Bo Dai. 2024. HYDRA: Model Factorization Framework for Black-Box LLM Personalization. arXiv preprint arXiv:2406.02888 (2024). \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05731/images/0fe4c17f7d3b0064ee98d9689518a2b9f9a84baea0031c9d564a3783a1b2fd6b.jpg b/data/2025/2504_05xxx/2504.05731/images/0fe4c17f7d3b0064ee98d9689518a2b9f9a84baea0031c9d564a3783a1b2fd6b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb2f3e04396201d1ac31af717fe94c35933b9d28 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/0fe4c17f7d3b0064ee98d9689518a2b9f9a84baea0031c9d564a3783a1b2fd6b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fd1c5b5f9da3fc6c8ae41307334f5baadffb7c95871df63a15b9536f90a707c +size 11210 diff --git a/data/2025/2504_05xxx/2504.05731/images/1770e817342ffc39a1149bbb6148e83683024c61a30a0f540c0cbacac3546bc7.jpg b/data/2025/2504_05xxx/2504.05731/images/1770e817342ffc39a1149bbb6148e83683024c61a30a0f540c0cbacac3546bc7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea794584557d7a9c6f6996da28dbc566140f10d2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/1770e817342ffc39a1149bbb6148e83683024c61a30a0f540c0cbacac3546bc7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a03578fd08cf32a9f4ed4084a7ca529350c6fea845137d2f1bc40058f08401c +size 5609 diff --git a/data/2025/2504_05xxx/2504.05731/images/1b4b2debe6472ddf2afda8745e85c61d5617ebc984a7385407045378024caa4a.jpg b/data/2025/2504_05xxx/2504.05731/images/1b4b2debe6472ddf2afda8745e85c61d5617ebc984a7385407045378024caa4a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cc7346afaf9d25b739762efa4e6fdff712f84801 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/1b4b2debe6472ddf2afda8745e85c61d5617ebc984a7385407045378024caa4a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78879fab6889e7b21cfc3a338d5cfcf462416c54db67a4e223c46b89febb49e7 +size 5653 diff --git a/data/2025/2504_05xxx/2504.05731/images/20182a7db51c3fa3024198281989434352d72122a26392fd7dce39da64ecec9d.jpg b/data/2025/2504_05xxx/2504.05731/images/20182a7db51c3fa3024198281989434352d72122a26392fd7dce39da64ecec9d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..395cf1242e5cb21f5f7dff5354970b1c444fbbf0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/20182a7db51c3fa3024198281989434352d72122a26392fd7dce39da64ecec9d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29fc94098fd80f6c0a07f74478a1c730d6d141b217d71adf954cabde8826183b +size 11735 diff --git a/data/2025/2504_05xxx/2504.05731/images/208072a2dd51268962130922f22ad7a74e36f3ec865e34dc65a393a56cbe2a6c.jpg b/data/2025/2504_05xxx/2504.05731/images/208072a2dd51268962130922f22ad7a74e36f3ec865e34dc65a393a56cbe2a6c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..44c8861b38b9b777077cfcc145b7fc72084527de --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/208072a2dd51268962130922f22ad7a74e36f3ec865e34dc65a393a56cbe2a6c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a242e2ca9efcb6d8e85a2f36b5e11b72ad1b88b97ec64ce37b92ba5ceab02594 +size 11029 diff --git a/data/2025/2504_05xxx/2504.05731/images/286390de4f0085e9bc40dde8cd842fc967aed23ac33c9c8b2fef3cd5ce5de750.jpg b/data/2025/2504_05xxx/2504.05731/images/286390de4f0085e9bc40dde8cd842fc967aed23ac33c9c8b2fef3cd5ce5de750.jpg new file mode 100644 index 0000000000000000000000000000000000000000..586cb9a156b1d0a2b7f068975bccb75361df9888 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/286390de4f0085e9bc40dde8cd842fc967aed23ac33c9c8b2fef3cd5ce5de750.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:496b8936eab3301919f825ff23f57bd1a4f6d5223333b83ce5ade23c4eaac7a6 +size 4956 diff --git a/data/2025/2504_05xxx/2504.05731/images/2949233b012ca5dc6bde7e95580b620544ae0db053562b8a4fa23243c5566927.jpg b/data/2025/2504_05xxx/2504.05731/images/2949233b012ca5dc6bde7e95580b620544ae0db053562b8a4fa23243c5566927.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b5bda3507dca67b6287bb54f485f799679279fb5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/2949233b012ca5dc6bde7e95580b620544ae0db053562b8a4fa23243c5566927.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ff7a02847a7bc5b28d9f31b8ce5ae55daefaca7ffff36ab098dacac636f5b32 +size 24835 diff --git a/data/2025/2504_05xxx/2504.05731/images/2b38bacf7f2f45a0608c219d86ab323366512e80026a8f5314c0af3b68f98d9f.jpg b/data/2025/2504_05xxx/2504.05731/images/2b38bacf7f2f45a0608c219d86ab323366512e80026a8f5314c0af3b68f98d9f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b0ef8d086914ec827de631ced0c687809d5255a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/2b38bacf7f2f45a0608c219d86ab323366512e80026a8f5314c0af3b68f98d9f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e05a538b789c69de93fa504dcf1c3bc9bfbcad9e772d4d60df5a1c4a9e6af73 +size 6257 diff --git a/data/2025/2504_05xxx/2504.05731/images/321ed29b3fe1bd2bf8b65455f1b9c4e37c82123640abf0223a5bca357a1a3ec4.jpg b/data/2025/2504_05xxx/2504.05731/images/321ed29b3fe1bd2bf8b65455f1b9c4e37c82123640abf0223a5bca357a1a3ec4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ef9c5949195064055e808bc9cc8053b6ef60aad --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/321ed29b3fe1bd2bf8b65455f1b9c4e37c82123640abf0223a5bca357a1a3ec4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ad6740d32a796934aab711abb45d44f24bcf66adf17aed0bd0f30afc6c3a85f +size 3412 diff --git a/data/2025/2504_05xxx/2504.05731/images/34f15c2d425a0ba3e0b25f67abb59aecc0f185aae42a557093eae9d488fd5f26.jpg b/data/2025/2504_05xxx/2504.05731/images/34f15c2d425a0ba3e0b25f67abb59aecc0f185aae42a557093eae9d488fd5f26.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cc06e41e5f6b3a351e6699dba767419362dabfc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/34f15c2d425a0ba3e0b25f67abb59aecc0f185aae42a557093eae9d488fd5f26.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:412ba66e5c5f8b126bea2ecf73a01e103f9ae70d6afa417de2271869fe77fdfc +size 78571 diff --git a/data/2025/2504_05xxx/2504.05731/images/370a5924ecdacae48795859736686a99e261ba862019de8a358e02e9a7b90220.jpg b/data/2025/2504_05xxx/2504.05731/images/370a5924ecdacae48795859736686a99e261ba862019de8a358e02e9a7b90220.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ff12f7fc7ccd5a177ec45437e0c61602afc06739 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/370a5924ecdacae48795859736686a99e261ba862019de8a358e02e9a7b90220.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7604a9b6435be28a7f6962658e1a553bc9745ea823151c6587f76df2cb5a3443 +size 5494 diff --git a/data/2025/2504_05xxx/2504.05731/images/45032d0b3f912fe033f347f966ca5cdbdf24f9320b31a6df192191757e19f649.jpg b/data/2025/2504_05xxx/2504.05731/images/45032d0b3f912fe033f347f966ca5cdbdf24f9320b31a6df192191757e19f649.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c8f923a4a9df8a6231f110e32c68d0662dccf550 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/45032d0b3f912fe033f347f966ca5cdbdf24f9320b31a6df192191757e19f649.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02e9036ff8fded88eb193539aa0dc28431fc796d05d4d0fcd84e1f5970674c8b +size 5846 diff --git a/data/2025/2504_05xxx/2504.05731/images/4d1923b82576cd1772eb0abb7c69d9b2c56470581b24e15f43d202ce4b7dd758.jpg b/data/2025/2504_05xxx/2504.05731/images/4d1923b82576cd1772eb0abb7c69d9b2c56470581b24e15f43d202ce4b7dd758.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0a6d189ecb9add7fd80f90bcf16c939d2dc07cdb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/4d1923b82576cd1772eb0abb7c69d9b2c56470581b24e15f43d202ce4b7dd758.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3b87f97aee29aeb8d9de5924b3f2ea3f60d04d331780301cdbaec9e3e11113e +size 84307 diff --git a/data/2025/2504_05xxx/2504.05731/images/4de643f3cc500517a2b74f1c2f8233ac753bd2f716de8d3e58deb45f124dd930.jpg b/data/2025/2504_05xxx/2504.05731/images/4de643f3cc500517a2b74f1c2f8233ac753bd2f716de8d3e58deb45f124dd930.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5e809eba8b6def8be6e1907e7a22a8e550600718 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/4de643f3cc500517a2b74f1c2f8233ac753bd2f716de8d3e58deb45f124dd930.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f064563940284d55aa6c8916bab75e9989e46531fc15271414653411ed586a7 +size 5529 diff --git a/data/2025/2504_05xxx/2504.05731/images/553b67f5416e9bf7a6397eb8a5e9f5ff9e90f77643a2e2ab2977210184904014.jpg b/data/2025/2504_05xxx/2504.05731/images/553b67f5416e9bf7a6397eb8a5e9f5ff9e90f77643a2e2ab2977210184904014.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4abeca23e1ed56360a581f9bba8f31f9a80cb081 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/553b67f5416e9bf7a6397eb8a5e9f5ff9e90f77643a2e2ab2977210184904014.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e58dc77f628dc0b0ffd906a5e2e90d441159b51fe177804782d73bbbdd4513b9 +size 5646 diff --git a/data/2025/2504_05xxx/2504.05731/images/5bb368b797bf6324680c0dab35687b3a755828cbb10a15bd627324662e645b92.jpg b/data/2025/2504_05xxx/2504.05731/images/5bb368b797bf6324680c0dab35687b3a755828cbb10a15bd627324662e645b92.jpg new file mode 100644 index 0000000000000000000000000000000000000000..99b31713f487385e91738a8052afbf8bfa5b7d39 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/5bb368b797bf6324680c0dab35687b3a755828cbb10a15bd627324662e645b92.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b67b13f94cfe85643f3b4aeb68b97df3b3fb159524f8d6399edd2ba3849ca839 +size 9148 diff --git a/data/2025/2504_05xxx/2504.05731/images/625fdaef24bf247580e641a34943670ea15cb8f8da28784bc68a9e6a62dbe508.jpg b/data/2025/2504_05xxx/2504.05731/images/625fdaef24bf247580e641a34943670ea15cb8f8da28784bc68a9e6a62dbe508.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f295b1883e402a9575335472ff113e1973ddef6b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/625fdaef24bf247580e641a34943670ea15cb8f8da28784bc68a9e6a62dbe508.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36a21a75a5e8bfcb2c92ff8caf4b33eeacc3b533c8a75bb77067d50a604ca3a7 +size 21303 diff --git a/data/2025/2504_05xxx/2504.05731/images/6cea494ce69dd19e4155bbac2bdb81b8052ffdd33a3f5e4661fcc323a21cbc4f.jpg b/data/2025/2504_05xxx/2504.05731/images/6cea494ce69dd19e4155bbac2bdb81b8052ffdd33a3f5e4661fcc323a21cbc4f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82d08f5a31135144ca3670eb8f97cfbd7404a9c7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/6cea494ce69dd19e4155bbac2bdb81b8052ffdd33a3f5e4661fcc323a21cbc4f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff12ddfe82c96f02d909a0d2bf7d8385bf894da6ce1f654176270dffa3e393a7 +size 235053 diff --git a/data/2025/2504_05xxx/2504.05731/images/6edc98bed93f6b43480ec1d654b337ca409da377fc6ab6198e2c9c1f23130ce8.jpg b/data/2025/2504_05xxx/2504.05731/images/6edc98bed93f6b43480ec1d654b337ca409da377fc6ab6198e2c9c1f23130ce8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..df9170c2e8c5fefc555d766a28025bd49c496752 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/6edc98bed93f6b43480ec1d654b337ca409da377fc6ab6198e2c9c1f23130ce8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f391d6482d1ea5b7f0a4e043995ed4e37e2bf033b778b9f3e45c8d4f23d334ca +size 4712 diff --git a/data/2025/2504_05xxx/2504.05731/images/7082bea53d532e5d94a2453a6bded23a4f6f523ae049809dfc0d039c880c149f.jpg b/data/2025/2504_05xxx/2504.05731/images/7082bea53d532e5d94a2453a6bded23a4f6f523ae049809dfc0d039c880c149f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d014ecfdd9e2920ea5c57dcbfe44e5361a0055c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/7082bea53d532e5d94a2453a6bded23a4f6f523ae049809dfc0d039c880c149f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b561e83a200ec6a14c599c14841a2ba132bdb49957d800e53d97811657280cff +size 123785 diff --git a/data/2025/2504_05xxx/2504.05731/images/79735191f069bd3fe577f6f68843cfd442708060861370a8d9a6cbcee6d995b3.jpg b/data/2025/2504_05xxx/2504.05731/images/79735191f069bd3fe577f6f68843cfd442708060861370a8d9a6cbcee6d995b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..50c1e9fd19f25616a063372d0e5aff8408654a1e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/79735191f069bd3fe577f6f68843cfd442708060861370a8d9a6cbcee6d995b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5299a359cbaea3a498ceaaddac1c64feee5d88e7e490f629c57b257b6aebd364 +size 11428 diff --git a/data/2025/2504_05xxx/2504.05731/images/81368817db3e34bbb1db188fa57860badccf1c91f22e28f4651cf117e8756cd9.jpg b/data/2025/2504_05xxx/2504.05731/images/81368817db3e34bbb1db188fa57860badccf1c91f22e28f4651cf117e8756cd9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f3756c63e602124c4510a329c5a4749a2394f2ea --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/81368817db3e34bbb1db188fa57860badccf1c91f22e28f4651cf117e8756cd9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57d31dc4afd505c86d6a68821f74ba5fed53e82b0be1cc000258a29ea450159f +size 9595 diff --git a/data/2025/2504_05xxx/2504.05731/images/83c00f18c2b907f8f9980e476359f97f132ed1a1ca8fa078b88d50e7d5d7f922.jpg b/data/2025/2504_05xxx/2504.05731/images/83c00f18c2b907f8f9980e476359f97f132ed1a1ca8fa078b88d50e7d5d7f922.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f24cb4e2de81a2b7e6dddbccd0b5753d53749686 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/83c00f18c2b907f8f9980e476359f97f132ed1a1ca8fa078b88d50e7d5d7f922.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61b5ec95253218d57033e26f6f43bdd5e93f488010064fa6350419950aeae8e3 +size 9603 diff --git a/data/2025/2504_05xxx/2504.05731/images/83cde8aa2ad4ce4f601933f54a812e13703cb17f9d80156a716ccbf53ac36f6e.jpg b/data/2025/2504_05xxx/2504.05731/images/83cde8aa2ad4ce4f601933f54a812e13703cb17f9d80156a716ccbf53ac36f6e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3686fe3bb832a3bbec7ddba6acc0106ce7f545ec --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/83cde8aa2ad4ce4f601933f54a812e13703cb17f9d80156a716ccbf53ac36f6e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6373c04d5a3a2d29649a7774d57dacc1eecb216c8adee51a2a6487607a95b15e +size 10362 diff --git a/data/2025/2504_05xxx/2504.05731/images/86faea8a327cef5e2b9ac66cf7e276f3ee4ebff4fccc9a5f5db5d20515b61ee6.jpg b/data/2025/2504_05xxx/2504.05731/images/86faea8a327cef5e2b9ac66cf7e276f3ee4ebff4fccc9a5f5db5d20515b61ee6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..23aeb184f531085413c49715bb5f4186c143e7df --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/86faea8a327cef5e2b9ac66cf7e276f3ee4ebff4fccc9a5f5db5d20515b61ee6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cabda29cf34800bcc8c1370922a1aadeed515b26ec0fa1ea37c6135aebd8f73 +size 10450 diff --git a/data/2025/2504_05xxx/2504.05731/images/92815eb6c2f51b4c9ef6ca63b04dd6a8f815dc0d32224bff35a685479dc7ee07.jpg b/data/2025/2504_05xxx/2504.05731/images/92815eb6c2f51b4c9ef6ca63b04dd6a8f815dc0d32224bff35a685479dc7ee07.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bf5385e47aaf97db05baa8b73d896277a98959d7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/92815eb6c2f51b4c9ef6ca63b04dd6a8f815dc0d32224bff35a685479dc7ee07.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0651bdfdc890f5c4ea9da45f37cfb78cd4af746d13ba0733a7ed41413f1a8cd9 +size 25603 diff --git a/data/2025/2504_05xxx/2504.05731/images/94633987414c00f738c3423cbe8c6e65847b434df8ed4ae5565aeb3a005e42e8.jpg b/data/2025/2504_05xxx/2504.05731/images/94633987414c00f738c3423cbe8c6e65847b434df8ed4ae5565aeb3a005e42e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c7ab35375c80b6b7acb55ac5a6d9ee9018257812 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/94633987414c00f738c3423cbe8c6e65847b434df8ed4ae5565aeb3a005e42e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96957d044ed61f92b6e4be5e8e90b85ba386b7caf9507e08d1e70ba65cbc6310 +size 10759 diff --git a/data/2025/2504_05xxx/2504.05731/images/9976451dd3eec8608f9caf953988cee5f27caa57ccb7db4f2daace3708385103.jpg b/data/2025/2504_05xxx/2504.05731/images/9976451dd3eec8608f9caf953988cee5f27caa57ccb7db4f2daace3708385103.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c83851ed968ec225caa86418536271422662554d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/9976451dd3eec8608f9caf953988cee5f27caa57ccb7db4f2daace3708385103.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:542435d6328e6f412f6a5a3d6d9289f74f464d87275b683595303afc79ef6c4a +size 15813 diff --git a/data/2025/2504_05xxx/2504.05731/images/9c481f693eb54eb4efbf0d5c0f2af5aa8f4d150f073794b47f3ad1f2fb237cf5.jpg b/data/2025/2504_05xxx/2504.05731/images/9c481f693eb54eb4efbf0d5c0f2af5aa8f4d150f073794b47f3ad1f2fb237cf5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20baa4da6277ea23f8ee591e025252beb737b004 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/9c481f693eb54eb4efbf0d5c0f2af5aa8f4d150f073794b47f3ad1f2fb237cf5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a8c66c86fe572718a97102b832be77e76e8133ac53e603c9ebf5ee7f9d9c873 +size 10890 diff --git a/data/2025/2504_05xxx/2504.05731/images/9ff4e06204e6c48c44c4467eca5a18d03e07c6da14c61020e7c1603ef065ab96.jpg b/data/2025/2504_05xxx/2504.05731/images/9ff4e06204e6c48c44c4467eca5a18d03e07c6da14c61020e7c1603ef065ab96.jpg new file mode 100644 index 0000000000000000000000000000000000000000..10ba3cd44ab2343fe84fdb0b6e3ea8aca63edfec --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/9ff4e06204e6c48c44c4467eca5a18d03e07c6da14c61020e7c1603ef065ab96.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca4a568a3f89a371867f4ac5be8918b4f93b8bfb915865129509e7fd51c7e986 +size 2923 diff --git a/data/2025/2504_05xxx/2504.05731/images/a6b102852714ef7badacf8034fddfa94795403b0b6339e25278197262aa0cdab.jpg b/data/2025/2504_05xxx/2504.05731/images/a6b102852714ef7badacf8034fddfa94795403b0b6339e25278197262aa0cdab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9ab2dce0cab9f8db31e446ebfbd42df67458d643 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/a6b102852714ef7badacf8034fddfa94795403b0b6339e25278197262aa0cdab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb13b24718d1ded6f7fd05d12266e5d49cb502a4561f94523d7a81e9f99c1d78 +size 3524 diff --git a/data/2025/2504_05xxx/2504.05731/images/acb69f3db4e448b0e25bc42ebbf3669d4e4b97072a06599b923f91b4cb5074dc.jpg b/data/2025/2504_05xxx/2504.05731/images/acb69f3db4e448b0e25bc42ebbf3669d4e4b97072a06599b923f91b4cb5074dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ced79633e3bafec3dcd448db11008608c9a32ec --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/acb69f3db4e448b0e25bc42ebbf3669d4e4b97072a06599b923f91b4cb5074dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07acca60c3b530601f62c5295c0dd574f9ff6da271e6d1b31509deb6f29f65d4 +size 11365 diff --git a/data/2025/2504_05xxx/2504.05731/images/c145d4b73d48d432adc24495509ff5b7dc143be4525fd4b84a31137fa6d78e46.jpg b/data/2025/2504_05xxx/2504.05731/images/c145d4b73d48d432adc24495509ff5b7dc143be4525fd4b84a31137fa6d78e46.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6cf8fd1c3de5a4ca0ee975155be878709afda870 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/c145d4b73d48d432adc24495509ff5b7dc143be4525fd4b84a31137fa6d78e46.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89896b153f6f692775376a0510cf0b23c0e729d9afdf6accf5e4fa52b92089aa +size 97148 diff --git a/data/2025/2504_05xxx/2504.05731/images/e096674f0f80370de56434a59eb14a7c664e0ab43e4a24769d974004128917ef.jpg b/data/2025/2504_05xxx/2504.05731/images/e096674f0f80370de56434a59eb14a7c664e0ab43e4a24769d974004128917ef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a3bf4042c65ae1a82e3f0690e3a794579e4416d6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/e096674f0f80370de56434a59eb14a7c664e0ab43e4a24769d974004128917ef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d677ffbd065529fcc9187358746ed9bd8c7afc8feaf899b37dda22a8707c1c2c +size 10977 diff --git a/data/2025/2504_05xxx/2504.05731/images/ec0d097f9eaf257d62428318090b463fd41bd34756cf25a311d0f4e5debaae76.jpg b/data/2025/2504_05xxx/2504.05731/images/ec0d097f9eaf257d62428318090b463fd41bd34756cf25a311d0f4e5debaae76.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc1f67c241e35f33fdddde307ab6ff470ed479c0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/ec0d097f9eaf257d62428318090b463fd41bd34756cf25a311d0f4e5debaae76.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b78df4ade1437ce8984c9881bb6ab2ce3e7e7748b471c0770c89cea4ed809918 +size 3839 diff --git a/data/2025/2504_05xxx/2504.05731/images/f519fd19f5d090c02b1805b4837891bd2fdbf2eba9b80e44d9e885596ab1fdf6.jpg b/data/2025/2504_05xxx/2504.05731/images/f519fd19f5d090c02b1805b4837891bd2fdbf2eba9b80e44d9e885596ab1fdf6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c58f48c006f045ef39dfb63b3b40b3e3f8c121ca --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/f519fd19f5d090c02b1805b4837891bd2fdbf2eba9b80e44d9e885596ab1fdf6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d742c736e0290d5e92c2725aef4de16c98cd0481977b9924b116f0a8aee878ef +size 5404 diff --git a/data/2025/2504_05xxx/2504.05731/images/fa1ad7982cf1c2b2fc0d929344eb06b13bcbc11bbbb85e0c5d98093ed5b060b1.jpg b/data/2025/2504_05xxx/2504.05731/images/fa1ad7982cf1c2b2fc0d929344eb06b13bcbc11bbbb85e0c5d98093ed5b060b1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73f22037d4cbc5154693bd1b489cfebab47e245c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/images/fa1ad7982cf1c2b2fc0d929344eb06b13bcbc11bbbb85e0c5d98093ed5b060b1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18289802d5238cc87c76ac1a0aa1edf20961e2e34fb18a652b24310d5ba5d6da +size 7264 diff --git a/data/2025/2504_05xxx/2504.05731/layout.json b/data/2025/2504_05xxx/2504.05731/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..1e2672f2c94cea762f2e2fb57d69767e10227749 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05731/layout.json @@ -0,0 +1,12949 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 52, + 79, + 559, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 79, + 559, + 118 + ], + "spans": [ + { + "bbox": [ + 52, + 79, + 559, + 118 + ], + "type": "text", + "content": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 159, + 125, + 205, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 125, + 205, + 137 + ], + "spans": [ + { + "bbox": [ + 159, + 125, + 205, + 137 + ], + "type": "text", + "content": "Teng Shi" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 124, + 139, + 241, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 139, + 241, + 150 + ], + "spans": [ + { + "bbox": [ + 124, + 139, + 241, + 150 + ], + "type": "text", + "content": "Renmin University of China" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 153, + 151, + 212, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 151, + 212, + 162 + ], + "spans": [ + { + "bbox": [ + 153, + 151, + 212, + 162 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 141, + 163, + 223, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 163, + 223, + 174 + ], + "spans": [ + { + "bbox": [ + 141, + 163, + 223, + 174 + ], + "type": "text", + "content": "shiteng@ruc.edu.cn" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 146, + 197, + 217, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 197, + 217, + 210 + ], + "spans": [ + { + "bbox": [ + 146, + 197, + 217, + 210 + ], + "type": "text", + "content": "Xiaoxue Zang" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 156, + 211, + 208, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 211, + 208, + 224 + ], + "spans": [ + { + "bbox": [ + 156, + 211, + 208, + 224 + ], + "type": "text", + "content": "Kai Zheng" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 119, + 224, + 246, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 224, + 246, + 236 + ], + "spans": [ + { + "bbox": [ + 119, + 224, + 246, + 236 + ], + "type": "text", + "content": "Kuaishou Technology Co., Ltd." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 152, + 236, + 211, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 236, + 211, + 247 + ], + "spans": [ + { + "bbox": [ + 152, + 236, + 211, + 247 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 145, + 248, + 219, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 248, + 219, + 258 + ], + "spans": [ + { + "bbox": [ + 145, + 248, + 219, + 258 + ], + "type": "text", + "content": "xxic666@126.com" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 136, + 260, + 228, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 260, + 228, + 272 + ], + "spans": [ + { + "bbox": [ + 136, + 260, + 228, + 272 + ], + "type": "text", + "content": "zhengk92@gmail.com" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 279, + 96, + 290 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 279, + 96, + 290 + ], + "spans": [ + { + "bbox": [ + 51, + 279, + 96, + 290 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 50, + 293, + 295, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 293, + 295, + 568 + ], + "spans": [ + { + "bbox": [ + 50, + 293, + 295, + 568 + ], + "type": "text", + "content": "Recently, the personalization of Large Language Models (LLMs) to generate content that aligns with individual user preferences has garnered widespread attention. Personalized Retrieval-Augmented Generation (RAG), which retrieves relevant documents from the user's history to reflect their preferences and enhance LLM generation, is one commonly used approach for personalization. However, existing personalized RAG methods do not consider that the histories of similar users can also assist in personalized generation for the current user, meaning that collaborative information between users can also benefit personalized generation. Inspired by the application of collaborative filtering in recommender systems, we propose a method called CFRAG, which adapts Collaborative Filtering to RAG for personalized text generation. However, this presents two challenges: (1) how to incorporate collaborative information without explicit user similarity labels? (2) how to retrieve documents that support personalized LLM generation? For Challenge 1, we use contrastive learning to train user embeddings to retrieve similar users and introduce collaborative information. For Challenge 2, we design a personalized retriever and reranker to retrieve the top-" + }, + { + "bbox": [ + 50, + 293, + 295, + 568 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 50, + 293, + 295, + 568 + ], + "type": "text", + "content": " documents from these users' histories. We take into account the user's preference during retrieval and reranking. Then we leverage feedback from the LLM to fine-tune the personalized retriever and reranker, enabling them to retrieve documents that meet the personalized generation needs of the LLM. Experimental results on the Language Model Personalization (LaMP) benchmark" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 50, + 579, + 295, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 579, + 295, + 605 + ], + "spans": [ + { + "bbox": [ + 50, + 579, + 295, + 605 + ], + "type": "text", + "content": "*Corresponding authors. Work partially done at Engineering Research Center of Next-Generation Intelligent Search and Recommendation, Ministry of Education. \nWork done when Teng Shi was the intern at Kuaishou." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 50, + 617, + 295, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 617, + 295, + 675 + ], + "spans": [ + { + "bbox": [ + 50, + 617, + 295, + 675 + ], + "type": "text", + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 52, + 676, + 117, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 676, + 117, + 684 + ], + "spans": [ + { + "bbox": [ + 52, + 676, + 117, + 684 + ], + "type": "text", + "content": "SIGIR '25, Padua, Italy." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 52, + 685, + 289, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 685, + 289, + 700 + ], + "spans": [ + { + "bbox": [ + 52, + 685, + 289, + 700 + ], + "type": "text", + "content": "© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM. ACM ISBN 979-8-4007-1592-1/25/07" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 52, + 700, + 177, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 700, + 177, + 709 + ], + "spans": [ + { + "bbox": [ + 52, + 700, + 177, + 709 + ], + "type": "text", + "content": "https://doi.org/10.1145/XXXXXX.XXXXXXX" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 408, + 125, + 447, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 408, + 125, + 447, + 137 + ], + "spans": [ + { + "bbox": [ + 408, + 125, + 447, + 137 + ], + "type": "text", + "content": "Jun Xu*" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 399, + 139, + 458, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 139, + 458, + 152 + ], + "spans": [ + { + "bbox": [ + 399, + 139, + 458, + 152 + ], + "type": "text", + "content": "Xiao Zhang" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 371, + 152, + 487, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 371, + 152, + 487, + 163 + ], + "spans": [ + { + "bbox": [ + 371, + 152, + 487, + 163 + ], + "type": "text", + "content": "Renmin University of China" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 399, + 164, + 459, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 164, + 459, + 175 + ], + "spans": [ + { + "bbox": [ + 399, + 164, + 459, + 175 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 367, + 176, + 490, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 176, + 490, + 187 + ], + "spans": [ + { + "bbox": [ + 367, + 176, + 490, + 187 + ], + "type": "text", + "content": "{junxu,zhangx89}@ruc.edu.cn" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 402, + 197, + 455, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 402, + 197, + 455, + 210 + ], + "spans": [ + { + "bbox": [ + 402, + 197, + 455, + 210 + ], + "type": "text", + "content": "Yang Song" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 410, + 212, + 446, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 410, + 212, + 446, + 222 + ], + "spans": [ + { + "bbox": [ + 410, + 212, + 446, + 222 + ], + "type": "text", + "content": "Han Li" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 365, + 224, + 492, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 224, + 492, + 236 + ], + "spans": [ + { + "bbox": [ + 365, + 224, + 492, + 236 + ], + "type": "text", + "content": "Kuaishou Technology Co., Ltd." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 399, + 236, + 458, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 236, + 458, + 247 + ], + "spans": [ + { + "bbox": [ + 399, + 236, + 458, + 247 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 399, + 248, + 458, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 248, + 458, + 259 + ], + "spans": [ + { + "bbox": [ + 399, + 248, + 458, + 259 + ], + "type": "text", + "content": "ys@sonyis.me" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 380, + 260, + 477, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 380, + 260, + 477, + 270 + ], + "spans": [ + { + "bbox": [ + 380, + 260, + 477, + 270 + ], + "type": "text", + "content": "lihan08@kuaishou.com" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 314, + 280, + 559, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 280, + 559, + 303 + ], + "spans": [ + { + "bbox": [ + 314, + 280, + 559, + 303 + ], + "type": "text", + "content": "validate the effectiveness of CFRAG. Further analysis confirms the importance of incorporating collaborative information." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 315, + 312, + 388, + 325 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 312, + 388, + 325 + ], + "spans": [ + { + "bbox": [ + 315, + 312, + 388, + 325 + ], + "type": "text", + "content": "CCS Concepts" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 314, + 327, + 567, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 327, + 567, + 350 + ], + "spans": [ + { + "bbox": [ + 314, + 327, + 567, + 350 + ], + "type": "text", + "content": "- Information systems " + }, + { + "bbox": [ + 314, + 327, + 567, + 350 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 314, + 327, + 567, + 350 + ], + "type": "text", + "content": " Personalization; - Computing methodologies " + }, + { + "bbox": [ + 314, + 327, + 567, + 350 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 314, + 327, + 567, + 350 + ], + "type": "text", + "content": " Natural language generation." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 315, + 360, + 367, + 372 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 360, + 367, + 372 + ], + "spans": [ + { + "bbox": [ + 315, + 360, + 367, + 372 + ], + "type": "text", + "content": "Keywords" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 314, + 374, + 559, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 374, + 559, + 396 + ], + "spans": [ + { + "bbox": [ + 314, + 374, + 559, + 396 + ], + "type": "text", + "content": "Large language model; Personalization; Retrieval augmented generation" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 315, + 403, + 405, + 411 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 403, + 405, + 411 + ], + "spans": [ + { + "bbox": [ + 315, + 403, + 405, + 411 + ], + "type": "text", + "content": "ACM Reference Format:" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 314, + 413, + 559, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 413, + 559, + 472 + ], + "spans": [ + { + "bbox": [ + 314, + 413, + 559, + 472 + ], + "type": "text", + "content": "Teng Shi, Jun Xu, Xiao Zhang, Xiaoxue Zang, Kai Zheng, Yang Song, and Han Li. 2025. Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation. In Proceedings of the 48th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '25), July 13-18, 2025, Padua, Italy. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/XXXXXX.XXXXXXX" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 315, + 487, + 398, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 487, + 398, + 498 + ], + "spans": [ + { + "bbox": [ + 315, + 487, + 398, + 498 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 313, + 501, + 559, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 501, + 559, + 567 + ], + "spans": [ + { + "bbox": [ + 313, + 501, + 559, + 567 + ], + "type": "text", + "content": "Personalizing Large Language Models (LLMs) [55] to generate personalized outputs tailored to individual user preferences has emerged as a significant and rapidly growing field [16, 23, 29, 31, 32, 36, 37, 57]. Personalized Retrieval-Augmented Generation (RAG) [8] has become a commonly used approach for personalizing LLMs [29, 31, 32, 57]." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 313, + 567, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 567, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 567, + 559, + 710 + ], + "type": "text", + "content": "The process of existing personalized RAG methods typically involves retrieving similar documents from the user's historical behaviors based on the user's input query, then concatenating these documents with the query as a prompt input to the LLM for generation. Although effective, this approach is limited to retrieving only the current user's history, neglecting collaborative information. Users with similar histories tend to be more alike, and the information from these similar users can also aid in personalizing generation for the current user. As shown in the example in Figure 1, the upper part illustrates the results of the existing RAG method, which retrieves documents from the current user's history. We can only infer from these results that \"She\" in the user's input refers to \"Hillary Clinton\". In contrast, the lower part demonstrates" + } + ] + } + ], + "index": 39 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 221, + 37, + 555 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 221, + 37, + 555 + ], + "spans": [ + { + "bbox": [ + 14, + 221, + 37, + 555 + ], + "type": "text", + "content": "arXiv:2504.05731v1 [cs.IR] 8 Apr 2025" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 65, + 82, + 541, + 276 + ], + "blocks": [ + { + "bbox": [ + 65, + 82, + 541, + 276 + ], + "lines": [ + { + "bbox": [ + 65, + 82, + 541, + 276 + ], + "spans": [ + { + "bbox": [ + 65, + 82, + 541, + 276 + ], + "type": "image", + "image_path": "c145d4b73d48d432adc24495509ff5b7dc143be4525fd4b84a31137fa6d78e46.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 278, + 559, + 334 + ], + "lines": [ + { + "bbox": [ + 50, + 278, + 559, + 334 + ], + "spans": [ + { + "bbox": [ + 50, + 278, + 559, + 334 + ], + "type": "text", + "content": "Figure 1: An example from the LaMP-4 dataset [32]. The task of LaMP-4 is to generate personalized news headlines based on user input. This example illustrates the benefit of collaborative information for LLM personalization: (a) The top shows results retrieved by the existing RAG method from the current user's history, where we can only infer that \"She\" in the user's input refers to \"Hillary Clinton\". (b) The bottom shows results retrieved by our method from similar users' histories, allowing us to infer further that \"his\" in the user's input refers to \"Donald Trump\" thus enabling the generation of a more accurate result." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 342, + 295, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 342, + 295, + 407 + ], + "spans": [ + { + "bbox": [ + 50, + 342, + 295, + 407 + ], + "type": "text", + "content": "our method, which retrieves documents from the history of similar users. In this case, we can further infer that \"his\" in the user's input refers to \"Donald Trump\", leading to a better generation result. From this example, we can see that incorporating collaborative information allows the retrieval of more diverse documents, helping the LLM generate results that better meet the user's needs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 407, + 295, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 407, + 295, + 561 + ], + "spans": [ + { + "bbox": [ + 50, + 407, + 295, + 561 + ], + "type": "text", + "content": "Inspired by the application of collaborative filtering in recommender systems [11, 40, 46], we propose to adapt collaborative information into RAG to personalize LLMs. However, adapting collaborative filtering to personalized RAG presents two challenges. Challenge 1: How to incorporate collaborative information. Without explicit labels indicating which users are similar, which users' information should be selected to help personalize generation for the current user? Challenge 2: How to retrieve documents that support personalized LLM generation, rather than relying on traditional semantic relevance? Pre-trained dense retrieval models [54] only retrieve based on the semantic relevance between the query and document. Directly using these models for retrieval may not necessarily result in content that allows the LLM to generate outputs that meet the user's needs [25, 35]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 561, + 295, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 561, + 295, + 704 + ], + "spans": [ + { + "bbox": [ + 50, + 561, + 295, + 704 + ], + "type": "text", + "content": "To address the above challenges, this paper proposes a method named CFRAG which adapts Collaborative Filtering to personalized Retrieval Augmented Generation. Firstly, to address Challenge 1, since there are no explicit user similarity labels, we use contrastive learning [15, 44] to train user embeddings for retrieving similar users to introduce collaborative information. Specifically, we apply different data augmentation methods to the user's history to obtain different views, and then treat different views of the same user's history as positive samples for each other. Then we use contrastive learning on different views to train the user embeddings. Secondly, for Challenge 2, we designed a personalized retriever and reranker to retrieve the top-" + }, + { + "bbox": [ + 50, + 561, + 295, + 704 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 50, + 561, + 295, + 704 + ], + "type": "text", + "content": " documents from the histories of the retrieved users. In both retrieval and reranking, in addition to the semantic" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 342, + 559, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 342, + 559, + 430 + ], + "spans": [ + { + "bbox": [ + 313, + 342, + 559, + 430 + ], + "type": "text", + "content": "relevance between the query and documents, we also considered the user's preferences for different documents to enable personalized retrieval. Additionally, we further fine-tune the retriever and reranker based on the feedback from the LLM to ensure that the retrieved documents better support the personalized LLM generation. Finally, the top-" + }, + { + "bbox": [ + 313, + 342, + 559, + 430 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 313, + 342, + 559, + 430 + ], + "type": "text", + "content": " documents are concatenated with the user's input query to form a prompt, which is then fed into the LLM for personalized generation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 430, + 559, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 430, + 559, + 485 + ], + "spans": [ + { + "bbox": [ + 314, + 430, + 559, + 485 + ], + "type": "text", + "content": "The major contributions of the paper are summarized as follows: We analyzed the necessity of introducing collaborative filtering into RAG for LLM personalization and identified the challenges: how to introduce collaborative information and how to retrieve documents that support personalized LLM generation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 314, + 484, + 559, + 583 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 314, + 484, + 559, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 484, + 559, + 539 + ], + "spans": [ + { + "bbox": [ + 314, + 484, + 559, + 539 + ], + "type": "text", + "content": "- We proposed a method called CFRAG, which uses contrastive learning to train user embeddings for retrieving similar users and incorporating collaborative information. It leverages LLM feedback to train the personalized retriever and reranker, enabling them to retrieve documents that support personalized LLM generation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 539, + 559, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 539, + 559, + 583 + ], + "spans": [ + { + "bbox": [ + 314, + 539, + 559, + 583 + ], + "type": "text", + "content": "- Experimental results on the Language Model Personalization (LaMP) [32] benchmark validate the effectiveness of CFRAG. The experimental analysis also demonstrates the importance of leveraging collaborative information." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 604, + 404, + 615 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 604, + 404, + 615 + ], + "spans": [ + { + "bbox": [ + 315, + 604, + 404, + 615 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 622, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 622, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 622, + 559, + 710 + ], + "type": "text", + "content": "Personalization of LLMs. Large Language Models (LLMs) [55] have demonstrated remarkable capabilities in various fields, such as text generation [22], information retrieval [56], recommender systems [5, 41], and so on. However, since LLMs are typically designed to serve all tasks with a single model and are trained on broad, domain-agnostic data, they face challenges in adapting to the personalized needs of individual users [4, 32]. Therefore, LLM personalization has attracted widespread attention [16, 31, 57]." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 171, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 171, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 171, + 68 + ], + "type": "text", + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "type": "text", + "content": "Teng Shi et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 67, + 83, + 539, + 266 + ], + "blocks": [ + { + "bbox": [ + 67, + 83, + 539, + 266 + ], + "lines": [ + { + "bbox": [ + 67, + 83, + 539, + 266 + ], + "spans": [ + { + "bbox": [ + 67, + 83, + 539, + 266 + ], + "type": "image", + "image_path": "34f15c2d425a0ba3e0b25f67abb59aecc0f185aae42a557093eae9d488fd5f26.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 49, + 268, + 560, + 312 + ], + "lines": [ + { + "bbox": [ + 49, + 268, + 560, + 312 + ], + "spans": [ + { + "bbox": [ + 49, + 268, + 560, + 312 + ], + "type": "text", + "content": "Figure 2: The architecture of CFRAG. From left to right: (a) User Retrieval retrieves similar users (Section 4.1); (b) Retriever retrieves the top- " + }, + { + "bbox": [ + 49, + 268, + 560, + 312 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 49, + 268, + 560, + 312 + ], + "type": "text", + "content": " documents from each user's history (Section 4.2); (c) Reranker reranks the " + }, + { + "bbox": [ + 49, + 268, + 560, + 312 + ], + "type": "inline_equation", + "content": "m \\times k" + }, + { + "bbox": [ + 49, + 268, + 560, + 312 + ], + "type": "text", + "content": " documents to get the final top- " + }, + { + "bbox": [ + 49, + 268, + 560, + 312 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 49, + 268, + 560, + 312 + ], + "type": "text", + "content": " documents, which are then concatenated with the query and input into the LLM for personalized text generation (Section 4.3)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 320, + 296, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 320, + 296, + 474 + ], + "spans": [ + { + "bbox": [ + 50, + 320, + 296, + 474 + ], + "type": "text", + "content": "Existing works on LLM personalization mainly include the following types of methods: (1) Fine-tuning a personalized LLM for each user [36, 37, 42]; Tan et al. [37] fine-tuned the LLM using LoRA [12] to get personalized LoRA parameters for each user. (2) Aligning LLMs with user-specific preferences through Reinforcement Learning from Human Feedback (RLHF) [16, 23, 43]; Jang et al. [16] first trained different parameters for various objectives using RLHF, then merged these parameters based on users' personalized needs. (3) Incorporating user-specific context into the prompt [21, 27, 29, 31, 32, 57]. Richardson et al. [29] used instruction-tuned LLMs to summarize user history and then incorporated it into prompts for generation. Salemi et al. [31, 32] used RAG to retrieve relevant documents from user history based on the input query and incorporated them into the prompt." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 474, + 296, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 474, + 296, + 605 + ], + "spans": [ + { + "bbox": [ + 50, + 474, + 296, + 605 + ], + "type": "text", + "content": "This paper further introduces collaborative filtering for personalization based on the RAG framework. Collaborative filtering has already been applied in fields such as recommender systems [33, 34, 38, 48-52] and has been proven effective. It assumes that users who have interacted with similar items share similar preferences, and recommending items from similar users to the current user can meet their needs. Some works [11, 46] learn the collaborative information between users and items through matrix factorization [19], while others [10, 40] further explore higher-order collaborative information between users and items using graph neural networks. The application of collaborative filtering in LLM personalization remains under-explored." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 608, + 296, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 608, + 296, + 708 + ], + "spans": [ + { + "bbox": [ + 50, + 608, + 296, + 708 + ], + "type": "text", + "content": "Retrieval Augmented Generation. Retrieval Augmented Generation [7, 8] introduces external knowledge through document retrieval, alleviating issues such as LLM hallucinations [53], and enhancing LLMs' capabilities in knowledge-intensive tasks [17] such as open-domain question answering [14, 20]. Some works [3, 13] encode retrieved documents using separate encoders, and then fuse the results with the language model using cross-attention. A more common approach is to directly include the retrieved documents in the prompt of the LLM [2, 9, 20, 25, 35]. In recent years, this" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 314, + 320, + 561, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 320, + 561, + 365 + ], + "spans": [ + { + "bbox": [ + 314, + 320, + 561, + 365 + ], + "type": "text", + "content": "in-context RAG framework has also been applied to LLM personalization, which is personalized by retrieving documents from the user's history [31, 32, 57]. This paper introduces collaborative filtering by retrieving similar users' histories for better personalization." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 315, + 373, + 442, + 385 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 373, + 442, + 385 + ], + "spans": [ + { + "bbox": [ + 315, + 373, + 442, + 385 + ], + "type": "text", + "content": "3 Problem Formulation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "spans": [ + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "inline_equation", + "content": "\\mathcal{U} = \\{u_1, u_2, \\ldots, u_M\\}" + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "text", + "content": " denotes the set of all users, where " + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "text", + "content": " is the number of users. Each user " + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "inline_equation", + "content": "u \\in \\mathcal{U}" + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "text", + "content": " has a chronologically ordered history " + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_u = [d_1, d_2, \\ldots, d_N]" + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "text", + "content": " which includes all her historical documents, where " + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "text", + "content": " is the number of documents in the history. The personalized text generation dataset is " + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{(u, q, y)_i\\}_{i=1}^{|D|}" + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "text", + "content": ". For each instance, " + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "text", + "content": " is the query input by the user " + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "text", + "content": " to the LLM, and " + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "text", + "content": " is the target output. Our goal is first to introduce collaborative information by retrieving the top-" + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "text", + "content": " most similar users for user " + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 314, + 388, + 560, + 479 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 382, + 483, + 492, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 382, + 483, + 492, + 495 + ], + "spans": [ + { + "bbox": [ + 382, + 483, + 492, + 495 + ], + "type": "interline_equation", + "content": "\\mathcal {U} _ {\\text {r e t r i e v e d}} = \\left\\{u _ {1}, u _ {2}, \\dots , u _ {m} \\right\\}.", + "image_path": "9ff4e06204e6c48c44c4467eca5a18d03e07c6da14c61020e7c1603ef065ab96.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 500, + 559, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 500, + 559, + 521 + ], + "spans": [ + { + "bbox": [ + 314, + 500, + 559, + 521 + ], + "type": "text", + "content": "Then, we use a retriever to retrieve the top-" + }, + { + "bbox": [ + 314, + 500, + 559, + 521 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 314, + 500, + 559, + 521 + ], + "type": "text", + "content": " documents from each of the " + }, + { + "bbox": [ + 314, + 500, + 559, + 521 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 314, + 500, + 559, + 521 + ], + "type": "text", + "content": " users' histories, resulting in a total of " + }, + { + "bbox": [ + 314, + 500, + 559, + 521 + ], + "type": "inline_equation", + "content": "m \\times k" + }, + { + "bbox": [ + 314, + 500, + 559, + 521 + ], + "type": "text", + "content": " documents." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 348, + 527, + 525, + 539 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 348, + 527, + 525, + 539 + ], + "spans": [ + { + "bbox": [ + 348, + 527, + 525, + 539 + ], + "type": "interline_equation", + "content": "\\mathcal {D} _ {\\text {r e t r i e v e d}} = \\{d _ {i, j} | i \\in \\{1, \\dots , m \\}, j \\in \\{1, \\dots , k \\} \\}.", + "image_path": "286390de4f0085e9bc40dde8cd842fc967aed23ac33c9c8b2fef3cd5ce5de750.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 543, + 559, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 543, + 559, + 565 + ], + "spans": [ + { + "bbox": [ + 314, + 543, + 559, + 565 + ], + "type": "text", + "content": "Finally, we use a reranker to rerank these " + }, + { + "bbox": [ + 314, + 543, + 559, + 565 + ], + "type": "inline_equation", + "content": "m \\times k" + }, + { + "bbox": [ + 314, + 543, + 559, + 565 + ], + "type": "text", + "content": " documents and obtain the final top-" + }, + { + "bbox": [ + 314, + 543, + 559, + 565 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 314, + 543, + 559, + 565 + ], + "type": "text", + "content": " documents:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 378, + 570, + 495, + 582 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 570, + 495, + 582 + ], + "spans": [ + { + "bbox": [ + 378, + 570, + 495, + 582 + ], + "type": "interline_equation", + "content": "\\mathcal {D} _ {\\text {r e r a n k e d}} = \\left\\{d _ {i} | i \\in \\{1, \\dots , k \\} \\right\\}.", + "image_path": "a6b102852714ef7badacf8034fddfa94795403b0b6339e25278197262aa0cdab.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 586, + 558, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 586, + 558, + 619 + ], + "spans": [ + { + "bbox": [ + 314, + 586, + 558, + 619 + ], + "type": "text", + "content": "These top-" + }, + { + "bbox": [ + 314, + 586, + 558, + 619 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 314, + 586, + 558, + 619 + ], + "type": "text", + "content": " documents will be concatenated with the user's query " + }, + { + "bbox": [ + 314, + 586, + 558, + 619 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 314, + 586, + 558, + 619 + ], + "type": "text", + "content": " as a prompt and input into the LLM, enabling it to generate a response that aligns with the target output " + }, + { + "bbox": [ + 314, + 586, + 558, + 619 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 314, + 586, + 558, + 619 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 619, + 559, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 619, + 559, + 664 + ], + "spans": [ + { + "bbox": [ + 314, + 619, + 559, + 664 + ], + "type": "text", + "content": "This paper primarily focuses on how to retrieve " + }, + { + "bbox": [ + 314, + 619, + 559, + 664 + ], + "type": "inline_equation", + "content": "\\mathcal{U}_{\\mathrm{retrieved}}" + }, + { + "bbox": [ + 314, + 619, + 559, + 664 + ], + "type": "text", + "content": " to introduce collaborative information, and how to train the retriever and reranker so that they can effectively retrieve documents that support the personalized LLM generation." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 673, + 406, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 673, + 406, + 685 + ], + "spans": [ + { + "bbox": [ + 315, + 673, + 406, + 685 + ], + "type": "text", + "content": "4 Our Approach" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 687, + 560, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 687, + 560, + 710 + ], + "spans": [ + { + "bbox": [ + 314, + 687, + 560, + 710 + ], + "type": "text", + "content": "This section introduces our method CFRAG. CFRAG's overall architecture is shown in Figure 2. As mentioned in Section 1, to address" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 326, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 326, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 326, + 69 + ], + "type": "text", + "content": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 440, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 440, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 440, + 60, + 559, + 69 + ], + "type": "text", + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 296, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 296, + 194 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 296, + 194 + ], + "type": "text", + "content": "Challenge 1, i.e., how to introduce collaborative information, we first train user embeddings using contrastive learning to retrieve the top-" + }, + { + "bbox": [ + 50, + 84, + 296, + 194 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 50, + 84, + 296, + 194 + ], + "type": "text", + "content": " most similar users (see Section 4.1). For Challenge 2, which involves retrieving documents that support personalized LLM generation, we fine-tune the personalized retriever and reranker using LLM feedback. The retriever first retrieves the top-" + }, + { + "bbox": [ + 50, + 84, + 296, + 194 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 50, + 84, + 296, + 194 + ], + "type": "text", + "content": " documents from the history of each of the " + }, + { + "bbox": [ + 50, + 84, + 296, + 194 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 50, + 84, + 296, + 194 + ], + "type": "text", + "content": " users, resulting in " + }, + { + "bbox": [ + 50, + 84, + 296, + 194 + ], + "type": "inline_equation", + "content": "m \\times k" + }, + { + "bbox": [ + 50, + 84, + 296, + 194 + ], + "type": "text", + "content": " documents (see Section 4.2). The reranker then reranks these documents to obtain the final top-" + }, + { + "bbox": [ + 50, + 84, + 296, + 194 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 50, + 84, + 296, + 194 + ], + "type": "text", + "content": " documents as input for the LLM (see Section 4.3)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 205, + 149, + 216 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 205, + 149, + 216 + ], + "spans": [ + { + "bbox": [ + 51, + 205, + 149, + 216 + ], + "type": "text", + "content": "4.1 User Retrieval" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 220, + 296, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 220, + 296, + 331 + ], + "spans": [ + { + "bbox": [ + 50, + 220, + 296, + 331 + ], + "type": "text", + "content": "First, we perform user retrieval to get the top-" + }, + { + "bbox": [ + 50, + 220, + 296, + 331 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 50, + 220, + 296, + 331 + ], + "type": "text", + "content": " most similar users for user " + }, + { + "bbox": [ + 50, + 220, + 296, + 331 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 50, + 220, + 296, + 331 + ], + "type": "text", + "content": " to introduce collaborative information. However, we do not have labels indicating which users are similar to each other. To address this, we employ a contrastive learning [15, 44] approach. We apply different data augmentation methods to the user history " + }, + { + "bbox": [ + 50, + 220, + 296, + 331 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_u" + }, + { + "bbox": [ + 50, + 220, + 296, + 331 + ], + "type": "text", + "content": " to obtain different views of the user's history. We treat different views of the same user as positive samples and the histories of other users as negative samples, and then we use the InfoNCE [28] loss to train user embeddings for retrieval. Figure 3 illustrates the process of training user embeddings using contrastive learning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "spans": [ + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "text", + "content": "4.1.1 User Encoder. Specifically, we first use an embedding model (such as BERT [6], RoBERTa [26], BGE [45] etc.) " + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "inline_equation", + "content": "\\mathbf{Emb}(\\cdot)" + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "text", + "content": " to encode each document in the user's history " + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_u" + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "text", + "content": " to obtain " + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_u = [\\mathbf{e}_1,\\mathbf{e}_2,\\dots ,\\mathbf{e}_N]^{\\intercal}\\in \\mathbb{R}^{N\\times d}" + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_i = \\mathbf{Emb}(d_i)" + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "text", + "content": " is the embedding dimension. To model the sequential relationships between different documents in the user's history, we introduce positional embedding " + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "inline_equation", + "content": "\\mathbf{P}\\in \\mathbb{R}^{N\\times d}" + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "text", + "content": ". Afterward, the history " + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_u" + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "text", + "content": " 's embedding becomes " + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{E}}_u = \\mathbf{E}_u + \\mathbf{P}" + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "text", + "content": ". Then, we apply a transformer [39] as the user encoder to encode the user's history " + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{E}}_u" + }, + { + "bbox": [ + 50, + 336, + 296, + 449 + ], + "type": "text", + "content": " and average the transformer's output to obtain the user's embedding:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 89, + 455, + 294, + 469 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 455, + 294, + 469 + ], + "spans": [ + { + "bbox": [ + 89, + 455, + 294, + 469 + ], + "type": "interline_equation", + "content": "\\mathbf {e} _ {u} = \\operatorname {E n c o d e r} _ {u} (u) = \\operatorname {M E A N} (\\operatorname {T r m} (\\widehat {\\mathbf {F}} _ {u})) \\in \\mathbb {R} ^ {d}, \\tag {1}", + "image_path": "1770e817342ffc39a1149bbb6148e83683024c61a30a0f540c0cbacac3546bc7.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 474, + 294, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 474, + 294, + 508 + ], + "spans": [ + { + "bbox": [ + 50, + 474, + 294, + 508 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 474, + 294, + 508 + ], + "type": "inline_equation", + "content": "\\mathrm{Encoder}_u(\\cdot)\\to \\mathbb{R}^d" + }, + { + "bbox": [ + 50, + 474, + 294, + 508 + ], + "type": "text", + "content": " denotes the user encoder, " + }, + { + "bbox": [ + 50, + 474, + 294, + 508 + ], + "type": "inline_equation", + "content": "\\mathrm{Trm}(\\cdot)" + }, + { + "bbox": [ + 50, + 474, + 294, + 508 + ], + "type": "text", + "content": " denotes a transformer encoder. Next, we train the transformer encoder using contrastive learning." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 514, + 294, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 514, + 294, + 536 + ], + "spans": [ + { + "bbox": [ + 51, + 514, + 294, + 536 + ], + "type": "text", + "content": "4.1.2 Data Augmentation. We generate different views of " + }, + { + "bbox": [ + 51, + 514, + 294, + 536 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_u" + }, + { + "bbox": [ + 51, + 514, + 294, + 536 + ], + "type": "text", + "content": " using the following three data augmentation methods:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 536, + 296, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 536, + 296, + 570 + ], + "spans": [ + { + "bbox": [ + 51, + 536, + 296, + 570 + ], + "type": "text", + "content": "Document Crop. We randomly select a continuous sub-sequence of length " + }, + { + "bbox": [ + 51, + 536, + 296, + 570 + ], + "type": "inline_equation", + "content": "L_{c} = \\lfloor \\eta_{c}N\\rfloor" + }, + { + "bbox": [ + 51, + 536, + 296, + 570 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 51, + 536, + 296, + 570 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_u" + }, + { + "bbox": [ + 51, + 536, + 296, + 570 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 51, + 536, + 296, + 570 + ], + "type": "inline_equation", + "content": "\\eta_c" + }, + { + "bbox": [ + 51, + 536, + 296, + 570 + ], + "type": "text", + "content": " is a hyper-parameter controlling the crop ratio. The history after cropping is as follows:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 113, + 574, + 232, + 590 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 574, + 232, + 590 + ], + "spans": [ + { + "bbox": [ + 113, + 574, + 232, + 590 + ], + "type": "interline_equation", + "content": "\\mathcal {H} _ {u} ^ {\\mathrm {c r o p}} = [ d _ {c}, d _ {c + 1}, \\dots , d _ {c + L _ {c} - 1} ].", + "image_path": "321ed29b3fe1bd2bf8b65455f1b9c4e37c82123640abf0223a5bca357a1a3ec4.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 594, + 296, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 594, + 296, + 661 + ], + "spans": [ + { + "bbox": [ + 50, + 594, + 296, + 661 + ], + "type": "text", + "content": "Document Mask. For the history " + }, + { + "bbox": [ + 50, + 594, + 296, + 661 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_u" + }, + { + "bbox": [ + 50, + 594, + 296, + 661 + ], + "type": "text", + "content": ", we randomly mask out " + }, + { + "bbox": [ + 50, + 594, + 296, + 661 + ], + "type": "inline_equation", + "content": "L_{m} = \\lfloor \\eta_{m}N\\rfloor" + }, + { + "bbox": [ + 50, + 594, + 296, + 661 + ], + "type": "text", + "content": " documents " + }, + { + "bbox": [ + 50, + 594, + 296, + 661 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathrm{mask}} = \\{i_1,i_2,\\dots ,i_{L_m}\\}" + }, + { + "bbox": [ + 50, + 594, + 296, + 661 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 50, + 594, + 296, + 661 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathrm{mask}}" + }, + { + "bbox": [ + 50, + 594, + 296, + 661 + ], + "type": "text", + "content": " is the set of indices corresponding to the masked documents and " + }, + { + "bbox": [ + 50, + 594, + 296, + 661 + ], + "type": "inline_equation", + "content": "\\eta_{m}" + }, + { + "bbox": [ + 50, + 594, + 296, + 661 + ], + "type": "text", + "content": " is a hyper-parameter that controls the mask ratio. The masked documents are replaced with a special token [mask]. The history after masking is as follows:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 112, + 666, + 230, + 712 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 666, + 230, + 712 + ], + "spans": [ + { + "bbox": [ + 112, + 666, + 230, + 712 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {H} _ {u} ^ {\\text {m a s k}} = \\left[ \\hat {d} _ {1}, \\hat {d} _ {2}, \\dots , \\hat {d} _ {N} \\right], \\\\ \\hat {d} _ {i} = \\left\\{ \\begin{array}{l l} d _ {i}, & i \\notin \\mathcal {I} _ {\\text {m a s k}}, \\\\ [ \\text {m a s k} ], & i \\in \\mathcal {I} _ {\\text {m a s k}}. \\end{array} \\right. \\\\ \\end{array}", + "image_path": "fa1ad7982cf1c2b2fc0d929344eb06b13bcbc11bbbb85e0c5d98093ed5b060b1.jpg" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 320, + 83, + 553, + 194 + ], + "blocks": [ + { + "bbox": [ + 320, + 83, + 553, + 194 + ], + "lines": [ + { + "bbox": [ + 320, + 83, + 553, + 194 + ], + "spans": [ + { + "bbox": [ + 320, + 83, + 553, + 194 + ], + "type": "image", + "image_path": "625fdaef24bf247580e641a34943670ea15cb8f8da28784bc68a9e6a62dbe508.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 315, + 197, + 558, + 209 + ], + "lines": [ + { + "bbox": [ + 315, + 197, + 558, + 209 + ], + "spans": [ + { + "bbox": [ + 315, + 197, + 558, + 209 + ], + "type": "text", + "content": "Figure 3: Contrastive learning for user embedding training." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 220, + 559, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 220, + 559, + 285 + ], + "spans": [ + { + "bbox": [ + 314, + 220, + 559, + 285 + ], + "type": "text", + "content": "Document Reorder. We randomly select a sub-sequence " + }, + { + "bbox": [ + 314, + 220, + 559, + 285 + ], + "type": "inline_equation", + "content": "[d_r, d_{r+1}, \\ldots, d_{r+L_r-1}]" + }, + { + "bbox": [ + 314, + 220, + 559, + 285 + ], + "type": "text", + "content": " of length " + }, + { + "bbox": [ + 314, + 220, + 559, + 285 + ], + "type": "inline_equation", + "content": "L_r = \\lfloor \\eta_r N \\rfloor" + }, + { + "bbox": [ + 314, + 220, + 559, + 285 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 314, + 220, + 559, + 285 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_u" + }, + { + "bbox": [ + 314, + 220, + 559, + 285 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 314, + 220, + 559, + 285 + ], + "type": "inline_equation", + "content": "\\eta_r" + }, + { + "bbox": [ + 314, + 220, + 559, + 285 + ], + "type": "text", + "content": " is a hyper-parameter controlling the reorder ratio, and then randomly shuffle the order of the documents within the sub-sequence to obtain " + }, + { + "bbox": [ + 314, + 220, + 559, + 285 + ], + "type": "inline_equation", + "content": "[\\hat{d}_r, \\hat{d}_{r+1}, \\ldots, \\hat{d}_{r+L_r-1}]" + }, + { + "bbox": [ + 314, + 220, + 559, + 285 + ], + "type": "text", + "content": ". The history after reordering is as follows:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 349, + 289, + 523, + 304 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 349, + 289, + 523, + 304 + ], + "spans": [ + { + "bbox": [ + 349, + 289, + 523, + 304 + ], + "type": "interline_equation", + "content": "\\mathcal {H} _ {u} ^ {\\text {r e o r d e r}} = \\left[ d _ {1}, d _ {2}, \\dots , \\hat {d} _ {r}, \\dots , \\hat {d} _ {r + L _ {r} - 1}, \\dots , d _ {N} \\right].", + "image_path": "6edc98bed93f6b43480ec1d654b337ca409da377fc6ab6198e2c9c1f23130ce8.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "spans": [ + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "text", + "content": "4.1.3 Contrastive Loss. Each time, we randomly select two data augmentation methods " + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "inline_equation", + "content": "\\mathcal{A}'" + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "inline_equation", + "content": "\\mathcal{A}''" + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "text", + "content": " to generate two different views of " + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_u" + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "text", + "content": ", denoted as " + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_u'" + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_u''" + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "text", + "content": ". Then, using the encoder described in Section 4.1.1, we obtain the user embeddings " + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_u'" + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_u''" + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "text", + "content": " corresponding to the different views. Since " + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_u'" + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_u''" + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "text", + "content": " are obtained through data augmentation of " + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_u" + }, + { + "bbox": [ + 314, + 308, + 559, + 418 + ], + "type": "text", + "content": ", they are more similar to each other. Therefore, we treat them as positive samples for each other and use the views generated from the augmented histories of other users in the same batch as negative samples. We then perform contrastive learning using the InfoNCE [28] loss as follows:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 345, + 422, + 558, + 482 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 345, + 422, + 558, + 482 + ], + "spans": [ + { + "bbox": [ + 345, + 422, + 558, + 482 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\mathrm {C L}} = - \\left[ \\log \\frac {\\exp \\left(\\cos \\left(\\mathbf {e} _ {u} ^ {\\prime} , \\mathbf {e} _ {u} ^ {\\prime \\prime}\\right) / \\tau_ {1}\\right)}{\\sum_ {u ^ {-} \\in \\mathcal {U} _ {\\mathrm {n e g}}} \\exp \\left(\\cos \\left(\\mathbf {e} _ {u} ^ {\\prime} , \\mathbf {e} _ {u ^ {-}} ^ {\\prime \\prime}\\right) / \\tau_ {1}\\right)} \\right. \\tag {2} \\\\ \\left. + \\log \\frac {\\exp \\left(\\cos \\left(\\mathbf {e} _ {u} ^ {\\prime} , \\mathbf {e} _ {u} ^ {\\prime \\prime}\\right) / \\tau_ {1}\\right)}{\\sum_ {u ^ {-} \\in \\mathcal {U} _ {\\text {n e g}}} \\exp \\left(\\cos \\left(\\mathbf {e} _ {u ^ {-}} ^ {\\prime}, \\mathbf {e} _ {u} ^ {\\prime \\prime}\\right) / \\tau_ {1}\\right)} \\right], \\\\ \\end{array}", + "image_path": "9976451dd3eec8608f9caf953988cee5f27caa57ccb7db4f2daace3708385103.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 486, + 560, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 486, + 560, + 519 + ], + "spans": [ + { + "bbox": [ + 314, + 486, + 560, + 519 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 314, + 486, + 560, + 519 + ], + "type": "inline_equation", + "content": "\\tau_{1}" + }, + { + "bbox": [ + 314, + 486, + 560, + 519 + ], + "type": "text", + "content": " is the temperature coefficient, " + }, + { + "bbox": [ + 314, + 486, + 560, + 519 + ], + "type": "inline_equation", + "content": "\\mathcal{U}_{\\mathrm{neg}}" + }, + { + "bbox": [ + 314, + 486, + 560, + 519 + ], + "type": "text", + "content": " are the set of randomly sampled in-batch negative samples, and " + }, + { + "bbox": [ + 314, + 486, + 560, + 519 + ], + "type": "inline_equation", + "content": "\\cos (\\cdot)" + }, + { + "bbox": [ + 314, + 486, + 560, + 519 + ], + "type": "text", + "content": " denotes the cosine similarity." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 525, + 560, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 525, + 560, + 590 + ], + "spans": [ + { + "bbox": [ + 314, + 525, + 560, + 590 + ], + "type": "text", + "content": "4.1.4 Top-" + }, + { + "bbox": [ + 314, + 525, + 560, + 590 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 314, + 525, + 560, + 590 + ], + "type": "text", + "content": " User Retrieval. After training with contrastive learning, we can use the encoder from Section 4.1.1 to obtain the user embedding " + }, + { + "bbox": [ + 314, + 525, + 560, + 590 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_u" + }, + { + "bbox": [ + 314, + 525, + 560, + 590 + ], + "type": "text", + "content": ". We then calculate the cosine similarity between each pair of user embeddings and retrieve the top-" + }, + { + "bbox": [ + 314, + 525, + 560, + 590 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 314, + 525, + 560, + 590 + ], + "type": "text", + "content": " most similar users " + }, + { + "bbox": [ + 314, + 525, + 560, + 590 + ], + "type": "inline_equation", + "content": "\\mathcal{U}_{\\mathrm{retrieved}} = \\{u_1, u_2, \\dots, u_m\\}" + }, + { + "bbox": [ + 314, + 525, + 560, + 590 + ], + "type": "text", + "content": " for user " + }, + { + "bbox": [ + 314, + 525, + 560, + 590 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 314, + 525, + 560, + 590 + ], + "type": "text", + "content": ". Subsequently, the histories of these " + }, + { + "bbox": [ + 314, + 525, + 560, + 590 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 314, + 525, + 560, + 590 + ], + "type": "text", + "content": " users will be used for further document retrieval." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 601, + 443, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 601, + 443, + 612 + ], + "spans": [ + { + "bbox": [ + 315, + 601, + 443, + 612 + ], + "type": "text", + "content": "4.2 Document Retrieval" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 314, + 615, + 560, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 615, + 560, + 682 + ], + "spans": [ + { + "bbox": [ + 314, + 615, + 560, + 682 + ], + "type": "text", + "content": "After retrieving the top-" + }, + { + "bbox": [ + 314, + 615, + 560, + 682 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 314, + 615, + 560, + 682 + ], + "type": "text", + "content": " users, we design a personalized retriever to retrieve the top-" + }, + { + "bbox": [ + 314, + 615, + 560, + 682 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 314, + 615, + 560, + 682 + ], + "type": "text", + "content": " documents from each user's history, resulting in a total of " + }, + { + "bbox": [ + 314, + 615, + 560, + 682 + ], + "type": "inline_equation", + "content": "m \\times k" + }, + { + "bbox": [ + 314, + 615, + 560, + 682 + ], + "type": "text", + "content": " candidate documents " + }, + { + "bbox": [ + 314, + 615, + 560, + 682 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\text{retrieved}} = \\{d_{i,j} | i \\in \\{1, \\ldots, m\\}, j \\in \\{1, \\ldots, k\\}\\}" + }, + { + "bbox": [ + 314, + 615, + 560, + 682 + ], + "type": "text", + "content": ". This section introduces how the retriever is designed and how it's trained to retrieve documents that better align with the requirements of personalized LLM generation." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "type": "text", + "content": "4.2.1 Retriever. First, we use a pre-trained dense retrieval model (such as BGE retriever [45]) to compute the semantic relevance" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 171, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 171, + 68 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 171, + 68 + ], + "type": "text", + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 515, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 515, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 515, + 60, + 558, + 68 + ], + "type": "text", + "content": "Teng Shi et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 83, + 290, + 183 + ], + "blocks": [ + { + "bbox": [ + 55, + 83, + 290, + 183 + ], + "lines": [ + { + "bbox": [ + 55, + 83, + 290, + 183 + ], + "spans": [ + { + "bbox": [ + 55, + 83, + 290, + 183 + ], + "type": "image", + "image_path": "92815eb6c2f51b4c9ef6ca63b04dd6a8f815dc0d32224bff35a685479dc7ee07.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 185, + 295, + 208 + ], + "lines": [ + { + "bbox": [ + 50, + 185, + 295, + 208 + ], + "spans": [ + { + "bbox": [ + 50, + 185, + 295, + 208 + ], + "type": "text", + "content": "Figure 4: The method of training the retriever and reranker using LLM feedback." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 222, + 235, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 222, + 235, + 233 + ], + "spans": [ + { + "bbox": [ + 50, + 222, + 235, + 233 + ], + "type": "text", + "content": "between the query and the candidate documents:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 91, + 239, + 295, + 255 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 239, + 295, + 255 + ], + "spans": [ + { + "bbox": [ + 91, + 239, + 295, + 255 + ], + "type": "interline_equation", + "content": "S _ {q, d} ^ {\\text {r e t r i e v e r}} = \\cos \\left(\\operatorname {E n c o d e r} _ {q} (q), \\operatorname {E n c o d e r} _ {d} (d)\\right), \\tag {3}", + "image_path": "45032d0b3f912fe033f347f966ca5cdbdf24f9320b31a6df192191757e19f649.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 262, + 296, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 262, + 296, + 366 + ], + "spans": [ + { + "bbox": [ + 50, + 262, + 296, + 366 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 262, + 296, + 366 + ], + "type": "inline_equation", + "content": "\\mathrm{Encoder}_q(\\cdot)\\to \\mathbb{R}^d" + }, + { + "bbox": [ + 50, + 262, + 296, + 366 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 262, + 296, + 366 + ], + "type": "inline_equation", + "content": "\\mathrm{Encoder}_d(\\cdot)\\rightarrow \\mathbb{R}^d" + }, + { + "bbox": [ + 50, + 262, + 296, + 366 + ], + "type": "text", + "content": " are the encoders for the query and the document in the retrieval model, respectively. Pre-trained retrieval models typically use " + }, + { + "bbox": [ + 50, + 262, + 296, + 366 + ], + "type": "inline_equation", + "content": "S_{q,d}^{\\mathrm{retriever}}" + }, + { + "bbox": [ + 50, + 262, + 296, + 366 + ], + "type": "text", + "content": " directly for retrieval. However, " + }, + { + "bbox": [ + 50, + 262, + 296, + 366 + ], + "type": "inline_equation", + "content": "S_{q,d}^{\\mathrm{retriever}}" + }, + { + "bbox": [ + 50, + 262, + 296, + 366 + ], + "type": "text", + "content": " only considers the semantic relevance between the query and the document. Since different users might input the same query but expect different outputs due to their varying preferences, we further account for user personalization by calculating the preference score of the user for the document as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 96, + 373, + 295, + 388 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 373, + 295, + 388 + ], + "spans": [ + { + "bbox": [ + 96, + 373, + 295, + 388 + ], + "type": "interline_equation", + "content": "S _ {u, d} ^ {\\text {r e t r i e v e r}} = \\cos \\left(\\mathrm {M L P} _ {1} \\left(\\mathbf {e} _ {u}\\right), \\operatorname {E n c o d e r} _ {d} (d)\\right), \\tag {4}", + "image_path": "f519fd19f5d090c02b1805b4837891bd2fdbf2eba9b80e44d9e885596ab1fdf6.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 394, + 295, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 394, + 295, + 440 + ], + "spans": [ + { + "bbox": [ + 50, + 394, + 295, + 440 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 394, + 295, + 440 + ], + "type": "inline_equation", + "content": "\\mathrm{MLP}_1: \\mathbb{R}^d \\to \\mathbb{R}^d" + }, + { + "bbox": [ + 50, + 394, + 295, + 440 + ], + "type": "text", + "content": " is a multi-layer perceptron that maps the user embedding to the space where the cosine similarity is computed. " + }, + { + "bbox": [ + 50, + 394, + 295, + 440 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_u" + }, + { + "bbox": [ + 50, + 394, + 295, + 440 + ], + "type": "text", + "content": " is the embedding obtained in Section 4.1.1. The total score for retrieval is computed as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 98, + 445, + 295, + 461 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 445, + 295, + 461 + ], + "spans": [ + { + "bbox": [ + 98, + 445, + 295, + 461 + ], + "type": "interline_equation", + "content": "S _ {u, q, d} ^ {\\text {r e t r i e v e r}} = (1 - \\alpha) S _ {q, d} ^ {\\text {r e t r i e v e r}} + \\alpha S _ {u, d} ^ {\\text {r e t r i e v e r}}, \\tag {5}", + "image_path": "553b67f5416e9bf7a6397eb8a5e9f5ff9e90f77643a2e2ab2977210184904014.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 466, + 296, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 466, + 296, + 489 + ], + "spans": [ + { + "bbox": [ + 50, + 466, + 296, + 489 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 466, + 296, + 489 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 50, + 466, + 296, + 489 + ], + "type": "text", + "content": " is a hyper-parameter that controls the weight of personalization." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 495, + 295, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 495, + 295, + 582 + ], + "spans": [ + { + "bbox": [ + 50, + 495, + 295, + 582 + ], + "type": "text", + "content": "4.2.2 Training. Since the pre-trained dense retrieval model is not fine-tuned for our specific task, the retrieved results may not necessarily lead to LLM responses that better match the target output " + }, + { + "bbox": [ + 50, + 495, + 295, + 582 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 50, + 495, + 295, + 582 + ], + "type": "text", + "content": " [25, 35]. However, there is no ground truth indicating which documents are better. Therefore, we evaluate the difference between the LLM's output and the target output " + }, + { + "bbox": [ + 50, + 495, + 295, + 582 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 50, + 495, + 295, + 582 + ], + "type": "text", + "content": ", using this as a label to train the retrieval model. Figure 4 shows the process of training the retriever using LLM feedback." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 583, + 295, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 583, + 295, + 651 + ], + "spans": [ + { + "bbox": [ + 50, + 583, + 295, + 651 + ], + "type": "text", + "content": "Specifically, we first use the pre-trained retrieval model to retrieve the top-" + }, + { + "bbox": [ + 50, + 583, + 295, + 651 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 50, + 583, + 295, + 651 + ], + "type": "text", + "content": " documents from each of the " + }, + { + "bbox": [ + 50, + 583, + 295, + 651 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 50, + 583, + 295, + 651 + ], + "type": "text", + "content": " users' histories based on " + }, + { + "bbox": [ + 50, + 583, + 295, + 651 + ], + "type": "inline_equation", + "content": "S_{q,d}^{\\mathrm{retriever}}" + }, + { + "bbox": [ + 50, + 583, + 295, + 651 + ], + "type": "text", + "content": " in Eq. (3), resulting in a total of " + }, + { + "bbox": [ + 50, + 583, + 295, + 651 + ], + "type": "inline_equation", + "content": "m \\times k" + }, + { + "bbox": [ + 50, + 583, + 295, + 651 + ], + "type": "text", + "content": " candidate documents. These documents are then concatenated with the query one by one and used as prompts for the LLM, producing " + }, + { + "bbox": [ + 50, + 583, + 295, + 651 + ], + "type": "inline_equation", + "content": "m \\times k" + }, + { + "bbox": [ + 50, + 583, + 295, + 651 + ], + "type": "text", + "content": " outputs:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 75, + 658, + 271, + 672 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 658, + 271, + 672 + ], + "spans": [ + { + "bbox": [ + 75, + 658, + 271, + 672 + ], + "type": "interline_equation", + "content": "\\{O _ {q, d _ {i, j}} = \\mathrm {L L M} (q, d _ {i, j}) | i \\in \\{1, \\dots , m \\}, j \\in \\{1, \\dots , k \\} \\},", + "image_path": "370a5924ecdacae48795859736686a99e261ba862019de8a358e02e9a7b90220.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 50, + 677, + 296, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 677, + 296, + 712 + ], + "spans": [ + { + "bbox": [ + 50, + 677, + 296, + 712 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 677, + 296, + 712 + ], + "type": "inline_equation", + "content": "\\mathrm{LLM}(q, d_{i,j})" + }, + { + "bbox": [ + 50, + 677, + 296, + 712 + ], + "type": "text", + "content": " represents the output generated by inputting the concatenated query " + }, + { + "bbox": [ + 50, + 677, + 296, + 712 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 50, + 677, + 296, + 712 + ], + "type": "text", + "content": " and document " + }, + { + "bbox": [ + 50, + 677, + 296, + 712 + ], + "type": "inline_equation", + "content": "d_{i,j}" + }, + { + "bbox": [ + 50, + 677, + 296, + 712 + ], + "type": "text", + "content": " into the LLM. Then, based on the quality of these outputs, we can calculate the distribution of" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 84, + 459, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 84, + 459, + 95 + ], + "spans": [ + { + "bbox": [ + 315, + 84, + 459, + 95 + ], + "type": "text", + "content": "these candidate documents as follows:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 345, + 99, + 558, + 129 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 345, + 99, + 558, + 129 + ], + "spans": [ + { + "bbox": [ + 345, + 99, + 558, + 129 + ], + "type": "interline_equation", + "content": "p _ {\\text {L L M}} \\left(d _ {i, j} \\mid q, y\\right) = \\frac {\\exp (\\operatorname {e v a l} \\left(y , O _ {q , d _ {i , j}}\\right))}{\\sum_ {i = 1} ^ {m} \\sum_ {j = 1} ^ {k} \\exp (\\operatorname {e v a l} \\left(y , O _ {q , d _ {i , j}}\\right))}, \\tag {6}", + "image_path": "81368817db3e34bbb1db188fa57860badccf1c91f22e28f4651cf117e8756cd9.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 133, + 559, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 133, + 559, + 201 + ], + "spans": [ + { + "bbox": [ + 314, + 133, + 559, + 201 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 314, + 133, + 559, + 201 + ], + "type": "inline_equation", + "content": "\\text{eval}(\\cdot)" + }, + { + "bbox": [ + 314, + 133, + 559, + 201 + ], + "type": "text", + "content": " measures the difference between the target output " + }, + { + "bbox": [ + 314, + 133, + 559, + 201 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 314, + 133, + 559, + 201 + ], + "type": "text", + "content": " and the LLM's output, using metrics such as ROUGE [24] score. A larger value returned by " + }, + { + "bbox": [ + 314, + 133, + 559, + 201 + ], + "type": "inline_equation", + "content": "\\text{eval}(\\cdot)" + }, + { + "bbox": [ + 314, + 133, + 559, + 201 + ], + "type": "text", + "content": " indicates a better-generated result. Similarly, we can also calculate the score distribution of the candidate documents by the retrieval model based on " + }, + { + "bbox": [ + 314, + 133, + 559, + 201 + ], + "type": "inline_equation", + "content": "S_{u,q,d}^{\\text{retriever}}" + }, + { + "bbox": [ + 314, + 133, + 559, + 201 + ], + "type": "text", + "content": " in Eq. (5):" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 349, + 205, + 558, + 241 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 349, + 205, + 558, + 241 + ], + "spans": [ + { + "bbox": [ + 349, + 205, + 558, + 241 + ], + "type": "interline_equation", + "content": "p _ {\\text {r e t r i e v e r}} \\left(d _ {i, j} \\mid q, u\\right) = \\frac {\\exp \\left(S _ {u , q , d _ {i , j}} ^ {\\text {r e t r i e v e r}}\\right)}{\\sum_ {i = 1} ^ {m} \\sum_ {j = 1} ^ {k} \\exp \\left(S _ {u , q , d _ {i , j}} ^ {\\text {r e t r i e v e r}}\\right)}. \\tag {7}", + "image_path": "5bb368b797bf6324680c0dab35687b3a755828cbb10a15bd627324662e645b92.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 244, + 559, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 244, + 559, + 300 + ], + "spans": [ + { + "bbox": [ + 314, + 244, + 559, + 300 + ], + "type": "text", + "content": "We aim for the retrieval model to retrieve documents that lead to better LLM-generated results, which means making the distribution " + }, + { + "bbox": [ + 314, + 244, + 559, + 300 + ], + "type": "inline_equation", + "content": "p_{\\mathrm{retriever}}(d|q,u)" + }, + { + "bbox": [ + 314, + 244, + 559, + 300 + ], + "type": "text", + "content": " in Eq. (7) closer to the distribution " + }, + { + "bbox": [ + 314, + 244, + 559, + 300 + ], + "type": "inline_equation", + "content": "p_{\\mathrm{LLM}}(d|q,y)" + }, + { + "bbox": [ + 314, + 244, + 559, + 300 + ], + "type": "text", + "content": " in Eq (6). Therefore, we compute the KL divergence between the two distributions as the loss to optimize the retriever:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 347, + 305, + 558, + 318 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 305, + 558, + 318 + ], + "spans": [ + { + "bbox": [ + 347, + 305, + 558, + 318 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {r e t r i e v e r}} = \\mathrm {K L} \\left(p _ {\\text {r e t r i e v e r}} (d | q, u) \\mid p _ {\\text {L L M}} (d | q, y)\\right). \\tag {8}", + "image_path": "4de643f3cc500517a2b74f1c2f8233ac753bd2f716de8d3e58deb45f124dd930.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 327, + 434, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 327, + 434, + 338 + ], + "spans": [ + { + "bbox": [ + 315, + 327, + 434, + 338 + ], + "type": "text", + "content": "4.3 Document Rerank" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 314, + 342, + 559, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 342, + 559, + 376 + ], + "spans": [ + { + "bbox": [ + 314, + 342, + 559, + 376 + ], + "type": "text", + "content": "After retrieving " + }, + { + "bbox": [ + 314, + 342, + 559, + 376 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{retrieved}}" + }, + { + "bbox": [ + 314, + 342, + 559, + 376 + ], + "type": "text", + "content": " through the retriever, in this section, we further refine the results by reranking " + }, + { + "bbox": [ + 314, + 342, + 559, + 376 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{retrieved}}" + }, + { + "bbox": [ + 314, + 342, + 559, + 376 + ], + "type": "text", + "content": " to obtain the final top-" + }, + { + "bbox": [ + 314, + 342, + 559, + 376 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 314, + 342, + 559, + 376 + ], + "type": "text", + "content": " ranked results " + }, + { + "bbox": [ + 314, + 342, + 559, + 376 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{reranked}} = \\{d_i | i \\in \\{1, \\dots, k\\}\\}" + }, + { + "bbox": [ + 314, + 342, + 559, + 376 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 314, + 381, + 558, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 381, + 558, + 424 + ], + "spans": [ + { + "bbox": [ + 314, + 381, + 558, + 424 + ], + "type": "text", + "content": "4.3.1 Reranker. We use a pre-trained cross-encoder (such as the BGE reranker [45]) to encode the query and document, obtaining the hidden state corresponding to the [CLS] token from the last layer:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 386, + 427, + 558, + 441 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 386, + 427, + 558, + 441 + ], + "spans": [ + { + "bbox": [ + 386, + 427, + 558, + 441 + ], + "type": "interline_equation", + "content": "\\mathbf {h} _ {q, d} = \\operatorname {C r o s s E n c o d e r} (q, d), \\tag {9}", + "image_path": "ec0d097f9eaf257d62428318090b463fd41bd34756cf25a311d0f4e5debaae76.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 313, + 444, + 559, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 444, + 559, + 544 + ], + "spans": [ + { + "bbox": [ + 313, + 444, + 559, + 544 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 444, + 559, + 544 + ], + "type": "inline_equation", + "content": "\\mathbf{h}_{q,d} \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 313, + 444, + 559, + 544 + ], + "type": "text", + "content": ". Similarly, when reranking, in addition to considering the semantic relevance between query and document, we also take into account the user's personalized preferences. However, since the cross-encoder does not encode documents separately, it cannot compute the cosine similarity between users and documents as shown in Eq. (4) to express the user preference score. Therefore, we directly concatenate the user embeddings to the output of the cross-encoder to account for the influence of user preferences. The overall score used for reranking is calculated as follows:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 350, + 548, + 558, + 564 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 350, + 548, + 558, + 564 + ], + "spans": [ + { + "bbox": [ + 350, + 548, + 558, + 564 + ], + "type": "interline_equation", + "content": "S _ {u, q, d} ^ {\\text {r e r a n k e r}} = \\mathrm {M L P} _ {3} \\left(\\operatorname {C O N C A T} \\left(\\mathbf {h} _ {q, d}, \\operatorname {M L P} _ {2} (\\mathbf {e} _ {u})\\right)\\right), \\tag {10}", + "image_path": "2b38bacf7f2f45a0608c219d86ab323366512e80026a8f5314c0af3b68f98d9f.jpg" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 313, + 570, + 558, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 570, + 558, + 594 + ], + "spans": [ + { + "bbox": [ + 313, + 570, + 558, + 594 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 570, + 558, + 594 + ], + "type": "inline_equation", + "content": "\\mathrm{MLP}_2: \\mathbb{R}^d \\to \\mathbb{R}^d" + }, + { + "bbox": [ + 313, + 570, + 558, + 594 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 570, + 558, + 594 + ], + "type": "inline_equation", + "content": "\\mathrm{MLP}_3: \\mathbb{R}^{2d} \\to \\mathbb{R}" + }, + { + "bbox": [ + 313, + 570, + 558, + 594 + ], + "type": "text", + "content": " are two multi-layer perceptions. " + }, + { + "bbox": [ + 313, + 570, + 558, + 594 + ], + "type": "inline_equation", + "content": "\\mathrm{CONCAT}(\\cdot)" + }, + { + "bbox": [ + 313, + 570, + 558, + 594 + ], + "type": "text", + "content": " denotes the concatenation operation." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 314, + 600, + 558, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 600, + 558, + 643 + ], + "spans": [ + { + "bbox": [ + 314, + 600, + 558, + 643 + ], + "type": "text", + "content": "4.3.2 Training. Similar to the retriever's training in Section 4.2.2, we also want the reranker to assign higher scores to the documents that lead to better LLM-generated results. Therefore, we train the reranker using a similar approach." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "type": "text", + "content": "We use the trained retrieval model from Section 4.2.2 to retrieve top-" + }, + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "type": "text", + "content": " documents from the history of each of the " + }, + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "type": "text", + "content": " users, resulting in a total of " + }, + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "type": "inline_equation", + "content": "m \\times k" + }, + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "type": "text", + "content": " candidate documents. These documents are concatenated with the query " + }, + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "type": "text", + "content": " and used as prompts for the LLM, producing " + }, + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "type": "inline_equation", + "content": "m \\times k" + }, + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "type": "text", + "content": " outputs. Similar to Eq.(6), we can obtain the distribution " + }, + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "type": "inline_equation", + "content": "p_{\\mathrm{LLM}}(d|q,y)" + }, + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "type": "text", + "content": " of these candidate documents. Based on" + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 326, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 326, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 326, + 69 + ], + "type": "text", + "content": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 440, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 440, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 440, + 60, + 558, + 69 + ], + "type": "text", + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 55, + 99, + 292, + 158 + ], + "blocks": [ + { + "bbox": [ + 67, + 83, + 277, + 95 + ], + "lines": [ + { + "bbox": [ + 67, + 83, + 277, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 83, + 277, + 95 + ], + "type": "text", + "content": "Table 1: Statistics of the datasets used in this paper." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 55, + 99, + 292, + 158 + ], + "lines": [ + { + "bbox": [ + 55, + 99, + 292, + 158 + ], + "spans": [ + { + "bbox": [ + 55, + 99, + 292, + 158 + ], + "type": "table", + "html": "
DatasetLaMP-1LaMP-2LaMP-3LaMP-4LaMP-5LaMP-7
#Users6,54292920,0001,64314,68213,437
#Train6,5425,07320,00012,50014,68213,437
#Dev1,5001,4102,5001,5001,5001,498
#Test1,5001,5572,5001,8001,5001,500
", + "image_path": "2949233b012ca5dc6bde7e95580b620544ae0db053562b8a4fa23243c5566927.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 169, + 294, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 169, + 294, + 195 + ], + "spans": [ + { + "bbox": [ + 50, + 169, + 294, + 195 + ], + "type": "inline_equation", + "content": "S_{u,q,d}^{\\mathrm{reranker}}" + }, + { + "bbox": [ + 50, + 169, + 294, + 195 + ], + "type": "text", + "content": " in Eq. (10), we can also get the score distribution of the candidate documents by the reranker:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 85, + 200, + 295, + 235 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 200, + 295, + 235 + ], + "spans": [ + { + "bbox": [ + 85, + 200, + 295, + 235 + ], + "type": "interline_equation", + "content": "p _ {\\text {r e r a n k e r}} \\left(d _ {i, j} \\mid q, u\\right) = \\frac {\\exp \\left(S _ {u , q , d _ {i , j}} ^ {\\text {r e r a n k e r}}\\right)}{\\sum_ {i = 1} ^ {m} \\sum_ {j = 1} ^ {k} \\exp \\left(S _ {u , q , d _ {i , j}} ^ {\\text {r e r a n k e r}}\\right)}. \\tag {11}", + "image_path": "83c00f18c2b907f8f9980e476359f97f132ed1a1ca8fa078b88d50e7d5d7f922.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 239, + 299, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 239, + 299, + 262 + ], + "spans": [ + { + "bbox": [ + 50, + 239, + 299, + 262 + ], + "type": "text", + "content": "We compute the KL divergence between distributions " + }, + { + "bbox": [ + 50, + 239, + 299, + 262 + ], + "type": "inline_equation", + "content": "p_{\\mathrm{reranker}}(d|q,u)" + }, + { + "bbox": [ + 50, + 239, + 299, + 262 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 239, + 299, + 262 + ], + "type": "inline_equation", + "content": "p_{\\mathrm{LLM}}(d|q,y)" + }, + { + "bbox": [ + 50, + 239, + 299, + 262 + ], + "type": "text", + "content": " as the loss to optimize the reranker:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 268, + 295, + 281 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 268, + 295, + 281 + ], + "spans": [ + { + "bbox": [ + 83, + 268, + 295, + 281 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {r e r a n k e r}} = \\mathrm {K L} \\left(p _ {\\text {r e r a n k e r}} (d | q, u) \\mid p _ {\\text {L L M}} (d | q, y)\\right). \\tag {12}", + "image_path": "1b4b2debe6472ddf2afda8745e85c61d5617ebc984a7385407045378024caa4a.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 285, + 295, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 285, + 295, + 308 + ], + "spans": [ + { + "bbox": [ + 50, + 285, + 295, + 308 + ], + "type": "text", + "content": "The loss allows the reranker to assign higher scores to documents that enable better personalized generation by the LLM." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 318, + 133, + 330 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 318, + 133, + 330 + ], + "spans": [ + { + "bbox": [ + 51, + 318, + 133, + 330 + ], + "type": "text", + "content": "4.4 Discussion" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 333, + 295, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 333, + 295, + 443 + ], + "spans": [ + { + "bbox": [ + 50, + 333, + 295, + 443 + ], + "type": "text", + "content": "Computational Efficiency. CFRAG comprises three modules. The User Encoder is a lightweight, single-layer Transformer with inputs derived from a frozen BGE embedding (dimension 768), resulting in minimal parameter overhead. The retriever and reranker are comparable in size to BERT (approximately 100M parameters). Overall, the training cost is low due to the modest parameter size. During inference, user and document embeddings can be precomputed, requiring only similarity calculations for retrieval, ensuring minimal computational cost. This efficiency enables our method to generalize quickly to new datasets." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 454, + 134, + 466 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 454, + 134, + 466 + ], + "spans": [ + { + "bbox": [ + 51, + 454, + 134, + 466 + ], + "type": "text", + "content": "5 Experiments" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 468, + 295, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 468, + 295, + 489 + ], + "spans": [ + { + "bbox": [ + 50, + 468, + 295, + 489 + ], + "type": "text", + "content": "We conducted experiments to evaluate the performance of CFRAG. The source code is available." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 501, + 178, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 501, + 178, + 514 + ], + "spans": [ + { + "bbox": [ + 51, + 501, + 178, + 514 + ], + "type": "text", + "content": "5.1 Experimental Setup" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 50, + 515, + 295, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 515, + 295, + 635 + ], + "spans": [ + { + "bbox": [ + 50, + 515, + 295, + 635 + ], + "type": "text", + "content": "5.1.1 Dataset. We conducted experiments on the Language Model Personalization (LaMP) [32] benchmark, which consists of seven personalized text generation tasks. We excluded LaMP-6 because its data is not publicly available. The remaining tasks include: LaMP-1 (Personalized Citation Identification); LaMP-2 (Personalized Movie Tagging); LaMP-3 (Personalized Product Rating); LaMP-4 (Personalized News Headline Generation); LaMP-5 (Personalized Scholarly Title Generation); LaMP-7 (Personalized Tweet Paraphrasing). We used the time-based split provided by LaMP to divide the data into training, validation, and test sets. The statistics of these datasets are shown in Table 1." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 50, + 643, + 295, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 643, + 295, + 687 + ], + "spans": [ + { + "bbox": [ + 50, + 643, + 295, + 687 + ], + "type": "text", + "content": "5.1.2 Evaluation Metrics. Following previous works [31, 32], we evaluate Accuracy and F-1 score for LaMP-1 and LaMP-2, mean absolute error (MAE) and root mean squared error (RMSE) for LaMP-3, ROUGE-1 and ROUGE-L [24] for LaMP-4, LaMP-5 and LaMP-7." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 84, + 560, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 84, + 560, + 106 + ], + "spans": [ + { + "bbox": [ + 314, + 84, + 560, + 106 + ], + "type": "text", + "content": "5.1.3 Baselines. In this work, we compare CFRAG with the following methods." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 106, + 558, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 106, + 558, + 139 + ], + "spans": [ + { + "bbox": [ + 314, + 106, + 558, + 139 + ], + "type": "text", + "content": "No Personalization: We directly input the user's query into the LLM without retrieving from user history, using this as the non-personalized baseline. We refer to this method as Zero Shot." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 140, + 559, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 140, + 559, + 228 + ], + "spans": [ + { + "bbox": [ + 313, + 140, + 559, + 228 + ], + "type": "text", + "content": "Personalized Baselines: We compared CFRAG with methods that personalize by retrieving from user history using different retrieval models, including: (1) Random selects " + }, + { + "bbox": [ + 313, + 140, + 559, + 228 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 313, + 140, + 559, + 228 + ], + "type": "text", + "content": " items randomly from the user's history; (2) Recency selects the most recent " + }, + { + "bbox": [ + 313, + 140, + 559, + 228 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 313, + 140, + 559, + 228 + ], + "type": "text", + "content": " items from the user's history; (3) BM25 [30] retrieves top- " + }, + { + "bbox": [ + 313, + 140, + 559, + 228 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 313, + 140, + 559, + 228 + ], + "type": "text", + "content": " items from the user's history using BM25; (4) BGE [45] retrieves top- " + }, + { + "bbox": [ + 313, + 140, + 559, + 228 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 313, + 140, + 559, + 228 + ], + "type": "text", + "content": " items from the user's history using BGE retriever; (5) ROPG [31] optimizes the dense retrieval model based on the results generated by the LLM." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "spans": [ + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "text", + "content": "5.1.4 Implementation Details. We conducted experiments on two LLMs: Llama3-8B-Instruct [1] and Qwen2-7B-Instruct [47]. In this paper, we do not fine-tune the LLM because fine-tuning is costly and could cause the LLM to retain user information, potentially compromising user privacy. To ensure a fair comparison, we use greedy search for text generation. The dense retrieval model used in all methods is bge-base-en-v1.5² [45]. The cross-encoder used for reranker in Section 4.3.1 is bge-reranker-base³ [45]. All hyperparameters for the baselines are searched according to the settings in the original papers. The embedding dimension " + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "text", + "content": " is set to 768. The number of retrieved documents " + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "text", + "content": " is set to 5, and the number of retrieved users " + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "text", + "content": " is tuned among " + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "inline_equation", + "content": "\\{2,3,4,5,6\\}" + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "text", + "content": ". The " + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "inline_equation", + "content": "\\mathrm{Trm}(\\cdot)" + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "text", + "content": " encoder in Eq. (1) has 1 layer and 2 heads. The hyperparameters " + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "inline_equation", + "content": "L_{c}, L_{m}" + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "inline_equation", + "content": "L_{r}" + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "text", + "content": " used for data augmentation in Section 4.1.2 are set to 0.7, 0.3, and 0.3, respectively. The temperature parameters " + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "inline_equation", + "content": "\\tau_{1}" + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "text", + "content": " in Eq. (2) is tuned among " + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "inline_equation", + "content": "\\{0.01, 0.1, 1\\}" + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "text", + "content": ". The weight " + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "text", + "content": " in Eq. (5) is tuned among " + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "inline_equation", + "content": "[0.01, 1.0]" + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "text", + "content": ". The learning rate is tuned among " + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "inline_equation", + "content": "\\{1e - 3, 1e - 4, 1e - 5\\}" + }, + { + "bbox": [ + 313, + 233, + 559, + 441 + ], + "type": "text", + "content": ". Adam [18] is used to conduct the optimization. The data input and output formats are provided in Appendix A." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 450, + 450, + 463 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 450, + 450, + 463 + ], + "spans": [ + { + "bbox": [ + 315, + 450, + 450, + 463 + ], + "type": "text", + "content": "5.2 Experimental Results" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 314, + 465, + 558, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 465, + 558, + 486 + ], + "spans": [ + { + "bbox": [ + 314, + 465, + 558, + 486 + ], + "type": "text", + "content": "Experimental results are shown in Table 2. From the results, we can find that:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 314, + 487, + 559, + 685 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 314, + 487, + 559, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 487, + 559, + 553 + ], + "spans": [ + { + "bbox": [ + 314, + 487, + 559, + 553 + ], + "type": "text", + "content": "- Firstly, compared to existing methods, CFRAG achieved the best results across six datasets in the LaMP benchmark. This demonstrates the effectiveness of introducing collaborative information between users into RAG and using LLM feedback to tune the retriever and reranker to ensure that they can retrieve the documents that support the personalized LLM generation." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 314, + 553, + 559, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 553, + 559, + 629 + ], + "spans": [ + { + "bbox": [ + 314, + 553, + 559, + 629 + ], + "type": "text", + "content": "- Secondly, we can observe that even randomly selecting user history outperforms the zero-shot method without any user history. This highlights the importance of incorporating user history to reflect user preferences for personalized generation. Additionally, we observe that retrieval methods perform better than simply selecting the most recent user history, underscoring the importance of retrieval." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 314, + 630, + 559, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 630, + 559, + 685 + ], + "spans": [ + { + "bbox": [ + 314, + 630, + 559, + 685 + ], + "type": "text", + "content": "- Thirdly, we also observe that, in most cases, RAG and ROPG methods using dense retrieval models outperform BM25. Additionally, CFRAG, which fine-tunes the retriever based on LLM feedback, achieves better results. This shows, on the one hand, that the better the retriever, the better the generation results, and on the other" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 171, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 171, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 171, + 68 + ], + "type": "text", + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "type": "text", + "content": "Teng Shi et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 315, + 691, + 457, + 700 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 691, + 457, + 700 + ], + "spans": [ + { + "bbox": [ + 315, + 691, + 457, + 700 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 691, + 457, + 700 + ], + "type": "text", + "content": "https://huggingface.co/BAAI/bge-base-en-v1.5" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 315, + 700, + 458, + 709 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 700, + 458, + 709 + ], + "spans": [ + { + "bbox": [ + 315, + 700, + 458, + 709 + ], + "type": "text", + "content": "3https://huggingface.co/BAAI/bge-eranker-base" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 52, + 700, + 175, + 709 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 700, + 175, + 709 + ], + "spans": [ + { + "bbox": [ + 52, + 700, + 175, + 709 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 52, + 700, + 175, + 709 + ], + "type": "text", + "content": "https://github.com/TengShi-RUC/CFRAG" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 56, + 134, + 552, + 278 + ], + "blocks": [ + { + "bbox": [ + 50, + 82, + 560, + 128 + ], + "lines": [ + { + "bbox": [ + 50, + 82, + 560, + 128 + ], + "spans": [ + { + "bbox": [ + 50, + 82, + 560, + 128 + ], + "type": "text", + "content": "Table 2: Comparison of the performance of CFRAG with other approaches on the LaMP benchmark. " + }, + { + "bbox": [ + 50, + 82, + 560, + 128 + ], + "type": "inline_equation", + "content": "\\uparrow" + }, + { + "bbox": [ + 50, + 82, + 560, + 128 + ], + "type": "text", + "content": " indicates that a higher value for the corresponding metric is better, while " + }, + { + "bbox": [ + 50, + 82, + 560, + 128 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 50, + 82, + 560, + 128 + ], + "type": "text", + "content": " indicates that a lower value is better. The best and the second-best methods are highlighted in bold and underlined fonts, respectively. “*” indicates improvements over the second-best methods are statistically significant (" + }, + { + "bbox": [ + 50, + 82, + 560, + 128 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 50, + 82, + 560, + 128 + ], + "type": "text", + "content": "-test, " + }, + { + "bbox": [ + 50, + 82, + 560, + 128 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 50, + 82, + 560, + 128 + ], + "type": "text", + "content": "-value " + }, + { + "bbox": [ + 50, + 82, + 560, + 128 + ], + "type": "inline_equation", + "content": "< 0.05" + }, + { + "bbox": [ + 50, + 82, + 560, + 128 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 56, + 134, + 552, + 278 + ], + "lines": [ + { + "bbox": [ + 56, + 134, + 552, + 278 + ], + "spans": [ + { + "bbox": [ + 56, + 134, + 552, + 278 + ], + "type": "table", + "html": "
LLMsRetrieversLaMP-1LaMP-2LaMP-3LaMP-4LaMP-5LaMP-7
Accuracy ↑F1 ↑Accuracy ↑F1 ↑MAE ↓RMSE ↓ROUGE-1 ↑ROUGE-L ↑ROUGE-1 ↑ROUGE-L ↑ROUGE-1 ↑ROUGE-L ↑
Llama3Zero Shot0.49930.24970.29930.02000.50240.79040.14060.12280.44170.36500.30790.2593
Random0.57400.28700.39290.02620.41040.78330.17870.15710.45330.38750.31370.2508
Recency0.60400.30200.39930.02660.39800.74910.18560.16500.45730.39280.33250.2686
BM25 [30]0.62400.31200.42550.02840.40600.76660.18030.15910.46370.39780.34490.2780
BGE [45]0.63270.31630.45740.03050.35280.69690.18110.16110.46380.39580.33910.2742
ROPG [31]0.64400.32200.46810.03120.34560.69220.18380.16340.46380.39560.35300.2881
CFRAG0.6533*0.3267*0.5340*0.0356*0.2812*0.5997*0.1957*0.1745*0.4810*0.4153*0.3752*0.3055*
Qwen2Zero Shot0.50000.25000.29080.01940.44440.78050.12640.10810.41440.34680.39720.3229
Random0.56330.28170.32840.02190.40000.76210.15810.13770.45800.39210.42910.3564
Recency0.57730.28870.33260.02220.39120.75630.15810.13690.45620.39130.42470.3525
BM25 [30]0.59870.29930.35320.02350.42280.80270.15800.13740.46130.39500.42900.3570
BGE [45]0.60800.30400.36740.02450.36960.72110.16130.13980.45710.39100.43470.3605
ROPG [31]0.60930.30470.38300.02550.36720.73320.16170.14010.46000.39460.43450.3610
CFRAG0.61330.30670.3957*0.02640.3536*0.7071*0.16210.14120.4703*0.4029*0.4425*0.3708*
", + "image_path": "7082bea53d532e5d94a2453a6bded23a4f6f523ae049809dfc0d039c880c149f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 56, + 308, + 553, + 396 + ], + "blocks": [ + { + "bbox": [ + 50, + 281, + 560, + 304 + ], + "lines": [ + { + "bbox": [ + 50, + 281, + 560, + 304 + ], + "spans": [ + { + "bbox": [ + 50, + 281, + 560, + 304 + ], + "type": "text", + "content": "Table 3: Ablation Study of CFRAG on LaMP based on Llama3. \"MEAN\" represents using the average of user history document embeddings as the user embedding. \"w/o\" indicates the corresponding module in CFRAG is removed." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 56, + 308, + 553, + 396 + ], + "lines": [ + { + "bbox": [ + 56, + 308, + 553, + 396 + ], + "spans": [ + { + "bbox": [ + 56, + 308, + 553, + 396 + ], + "type": "table", + "html": "
VariantsLaMP-1LaMP-2LaMP-3LaMP-4LaMP-5LaMP-7
#ModelAccuracy ↑F1 ↑Accuracy ↑F1 ↑MAE ↓RMSE ↓ROUGE-1 ↑ROUGE-L ↑ROUGE-1 ↑ROUGE-L ↑ROUGE-1 ↑ROUGE-L ↑
(0)CFRAG0.65330.32670.53400.03560.28120.59970.19570.17450.48100.41530.37520.3055
(1)w/o User Retrieval0.64000.32000.49360.03290.34440.69250.19140.16890.46420.39630.35660.2903
(2)User Retrieval (MEAN)0.64200.32100.50640.03380.34120.68670.18470.16390.47790.41130.37220.3022
(3)w/o Retriever Tuning0.64530.32270.49790.03320.28520.60700.19160.17040.47420.40480.35990.2940
(4)w/o Sretriever in Eq. (5)0.63330.31670.51130.03410.33240.68610.18950.16960.47500.40880.37320.3039
(5)w/o Reranker Tuning0.63070.31530.46950.03130.36960.73920.17660.15500.47140.40680.34320.2775
(6)w/o eu in Eq. (10)0.63130.31570.49930.03330.34200.69250.18870.16720.47720.41230.37310.3030
", + "image_path": "4d1923b82576cd1772eb0abb7c69d9b2c56470581b24e15f43d202ce4b7dd758.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 403, + 295, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 403, + 295, + 436 + ], + "spans": [ + { + "bbox": [ + 50, + 403, + 295, + 436 + ], + "type": "text", + "content": "hand, fine-tuning the retriever based on LLM feedback to ensure it can retrieve the documents that meet the personalized generation needs of LLM is crucial." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 452, + 153, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 452, + 153, + 464 + ], + "spans": [ + { + "bbox": [ + 51, + 452, + 153, + 464 + ], + "type": "text", + "content": "5.3 Ablation Study" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 466, + 295, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 466, + 295, + 521 + ], + "spans": [ + { + "bbox": [ + 50, + 466, + 295, + 521 + ], + "type": "text", + "content": "We conducted an ablation study to investigate the effectiveness of different modules in CFRAG, as shown in Table 3. CFRAG consists of three modules: User Retrieval, Document Retrieval, and Document Rerank. We removed different modules from CFRAG one by one to verify the effectiveness of each module." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 533, + 295, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 533, + 295, + 599 + ], + "spans": [ + { + "bbox": [ + 50, + 533, + 295, + 599 + ], + "type": "text", + "content": "5.3.1 User Retrieval. First, we validated the effectiveness of introducing collaborative information by retrieving similar users, as shown in row (1) of Table 3. It can be seen that without retrieving similar users and only retrieving from the current user's history, the performance is worse than that of CFRAG, highlighting the importance of collaborative information." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 600, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 600, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 600, + 295, + 710 + ], + "type": "text", + "content": "We also validated the effectiveness of training user embeddings using contrastive learning. For comparison, we directly averaged the document embeddings from the user's history to create user embeddings for retrieval, as shown in row (2) of Table 3. It can be seen that CFRAG, which uses user embeddings trained with contrastive learning, achieves better results. This is because contrastive learning constructs user similarity labels through data augmentation and uses the InfoNCE loss to help the embeddings learn which users are similar. In contrast, using mean pooling directly cannot capture user similarity." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 319, + 405, + 434, + 475 + ], + "blocks": [ + { + "bbox": [ + 319, + 405, + 434, + 475 + ], + "lines": [ + { + "bbox": [ + 319, + 405, + 434, + 475 + ], + "spans": [ + { + "bbox": [ + 319, + 405, + 434, + 475 + ], + "type": "image", + "image_path": "94633987414c00f738c3423cbe8c6e65847b434df8ed4ae5565aeb3a005e42e8.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 359, + 479, + 394, + 488 + ], + "lines": [ + { + "bbox": [ + 359, + 479, + 394, + 488 + ], + "spans": [ + { + "bbox": [ + 359, + 479, + 394, + 488 + ], + "type": "text", + "content": "(a) LaMP-1" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 440, + 405, + 555, + 475 + ], + "blocks": [ + { + "bbox": [ + 440, + 405, + 555, + 475 + ], + "lines": [ + { + "bbox": [ + 440, + 405, + 555, + 475 + ], + "spans": [ + { + "bbox": [ + 440, + 405, + 555, + 475 + ], + "type": "image", + "image_path": "79735191f069bd3fe577f6f68843cfd442708060861370a8d9a6cbcee6d995b3.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 480, + 479, + 515, + 488 + ], + "lines": [ + { + "bbox": [ + 480, + 479, + 515, + 488 + ], + "spans": [ + { + "bbox": [ + 480, + 479, + 515, + 488 + ], + "type": "text", + "content": "(b) LaMP-5" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 314, + 499, + 560, + 555 + ], + "lines": [ + { + "bbox": [ + 314, + 499, + 560, + 555 + ], + "spans": [ + { + "bbox": [ + 314, + 499, + 560, + 555 + ], + "type": "text", + "content": "Figure 5: Results of using different methods to select users for introducing collaborative information. \"random\" indicates randomly selecting " + }, + { + "bbox": [ + 314, + 499, + 560, + 555 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 314, + 499, + 560, + 555 + ], + "type": "text", + "content": " users; \"top-" + }, + { + "bbox": [ + 314, + 499, + 560, + 555 + ], + "type": "inline_equation", + "content": "(m - 2m)" + }, + { + "bbox": [ + 314, + 499, + 560, + 555 + ], + "type": "text", + "content": "\" represents selecting users whose similarity to the current user ranks between " + }, + { + "bbox": [ + 314, + 499, + 560, + 555 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 314, + 499, + 560, + 555 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 499, + 560, + 555 + ], + "type": "inline_equation", + "content": "2m" + }, + { + "bbox": [ + 314, + 499, + 560, + 555 + ], + "type": "text", + "content": "; \"top-" + }, + { + "bbox": [ + 314, + 499, + 560, + 555 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 314, + 499, + 560, + 555 + ], + "type": "text", + "content": "\" indicates selecting the most similar " + }, + { + "bbox": [ + 314, + 499, + 560, + 555 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 314, + 499, + 560, + 555 + ], + "type": "text", + "content": " users." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 583, + 560, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 583, + 560, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 583, + 560, + 710 + ], + "type": "text", + "content": "5.3.2 Document Retrieval. We also validated the effectiveness of the personalized retriever we designed, as shown in Table 3, rows (3) and (4). First, in row (3), we can see that without fine-tuning based on LLM feedback, using a pre-trained dense retrieval model leads to worse performance. This indicates that retrieval cannot be based solely on semantic relevance, ensuring that the retrieved documents support personalized LLM generation is crucial. Additionally, we analyzed the impact of removing " + }, + { + "bbox": [ + 313, + 583, + 560, + 710 + ], + "type": "inline_equation", + "content": "S_{u,d}^{\\mathrm{retriever}}" + }, + { + "bbox": [ + 313, + 583, + 560, + 710 + ], + "type": "text", + "content": " from Eq. (4) and only using " + }, + { + "bbox": [ + 313, + 583, + 560, + 710 + ], + "type": "inline_equation", + "content": "S_{q,d}^{\\mathrm{retriever}}" + }, + { + "bbox": [ + 313, + 583, + 560, + 710 + ], + "type": "text", + "content": " from Eq. (3) for retrieval, as indicated in row (4). The results decreased, demonstrating that users' personalized preferences should also be considered during retrieval, rather" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 326, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 326, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 326, + 69 + ], + "type": "text", + "content": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 440, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 440, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 440, + 60, + 559, + 69 + ], + "type": "text", + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 86, + 169, + 156 + ], + "blocks": [ + { + "bbox": [ + 55, + 86, + 169, + 156 + ], + "lines": [ + { + "bbox": [ + 55, + 86, + 169, + 156 + ], + "spans": [ + { + "bbox": [ + 55, + 86, + 169, + 156 + ], + "type": "image", + "image_path": "86faea8a327cef5e2b9ac66cf7e276f3ee4ebff4fccc9a5f5db5d20515b61ee6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 96, + 160, + 130, + 169 + ], + "lines": [ + { + "bbox": [ + 96, + 160, + 130, + 169 + ], + "spans": [ + { + "bbox": [ + 96, + 160, + 130, + 169 + ], + "type": "text", + "content": "(a) LaMP-1" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 175, + 86, + 291, + 156 + ], + "blocks": [ + { + "bbox": [ + 175, + 86, + 291, + 156 + ], + "lines": [ + { + "bbox": [ + 175, + 86, + 291, + 156 + ], + "spans": [ + { + "bbox": [ + 175, + 86, + 291, + 156 + ], + "type": "image", + "image_path": "e096674f0f80370de56434a59eb14a7c664e0ab43e4a24769d974004128917ef.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 160, + 251, + 169 + ], + "lines": [ + { + "bbox": [ + 216, + 160, + 251, + 169 + ], + "spans": [ + { + "bbox": [ + 216, + 160, + 251, + 169 + ], + "type": "text", + "content": "(b) LaMP-5" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 55, + 236, + 170, + 306 + ], + "blocks": [ + { + "bbox": [ + 50, + 185, + 296, + 229 + ], + "lines": [ + { + "bbox": [ + 50, + 185, + 296, + 229 + ], + "spans": [ + { + "bbox": [ + 50, + 185, + 296, + 229 + ], + "type": "text", + "content": "Figure 6: Results using different retrievers and rerankers. \"BM25\" indicates using BM25 as both the retriever and reranker, while \"w/o Tuning\" refers to using pre-trained retrievers and rerankers without LLM feedback fine-tuning." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 236, + 170, + 306 + ], + "lines": [ + { + "bbox": [ + 55, + 236, + 170, + 306 + ], + "spans": [ + { + "bbox": [ + 55, + 236, + 170, + 306 + ], + "type": "image", + "image_path": "0fe4c17f7d3b0064ee98d9689518a2b9f9a84baea0031c9d564a3783a1b2fd6b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 96, + 310, + 130, + 319 + ], + "lines": [ + { + "bbox": [ + 96, + 310, + 130, + 319 + ], + "spans": [ + { + "bbox": [ + 96, + 310, + 130, + 319 + ], + "type": "text", + "content": "(a) LaMP-1" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 50, + 335, + 295, + 367 + ], + "lines": [ + { + "bbox": [ + 50, + 335, + 295, + 367 + ], + "spans": [ + { + "bbox": [ + 50, + 335, + 295, + 367 + ], + "type": "text", + "content": "Figure 7: Performance under different numbers of retrieved documents from the current user " + }, + { + "bbox": [ + 50, + 335, + 295, + 367 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 50, + 335, + 295, + 367 + ], + "type": "text", + "content": "'s history in the top-" + }, + { + "bbox": [ + 50, + 335, + 295, + 367 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 50, + 335, + 295, + 367 + ], + "type": "text", + "content": " documents." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 176, + 236, + 291, + 306 + ], + "blocks": [ + { + "bbox": [ + 176, + 236, + 291, + 306 + ], + "lines": [ + { + "bbox": [ + 176, + 236, + 291, + 306 + ], + "spans": [ + { + "bbox": [ + 176, + 236, + 291, + 306 + ], + "type": "image", + "image_path": "20182a7db51c3fa3024198281989434352d72122a26392fd7dce39da64ecec9d.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 310, + 251, + 319 + ], + "lines": [ + { + "bbox": [ + 216, + 310, + 251, + 319 + ], + "spans": [ + { + "bbox": [ + 216, + 310, + 251, + 319 + ], + "type": "text", + "content": "(b) LaMP-5" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 376, + 295, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 376, + 295, + 398 + ], + "spans": [ + { + "bbox": [ + 50, + 376, + 295, + 398 + ], + "type": "text", + "content": "than solely focusing on the semantic relevance between the query and documents." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 50, + 403, + 295, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 403, + 295, + 505 + ], + "spans": [ + { + "bbox": [ + 50, + 403, + 295, + 505 + ], + "type": "text", + "content": "5.3.3 Document Rerank. We also validated the effectiveness of the personalized reranker we designed, as shown in Table 3, rows (5) and (6). First, in row (5), it can be seen that using a pre-trained reranker leads to worse results, highlighting the importance of fine-tuning based on LLM feedback. We also observed the effect of removing " + }, + { + "bbox": [ + 50, + 403, + 295, + 505 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_u" + }, + { + "bbox": [ + 50, + 403, + 295, + 505 + ], + "type": "text", + "content": " from Eq. (10) and only using " + }, + { + "bbox": [ + 50, + 403, + 295, + 505 + ], + "type": "inline_equation", + "content": "\\mathbf{h}_{q,d}" + }, + { + "bbox": [ + 50, + 403, + 295, + 505 + ], + "type": "text", + "content": " to calculate " + }, + { + "bbox": [ + 50, + 403, + 295, + 505 + ], + "type": "inline_equation", + "content": "S_{q,d}^{\\text{reranker}}" + }, + { + "bbox": [ + 50, + 403, + 295, + 505 + ], + "type": "text", + "content": " for ranking, as indicated in row (6). The results decreased in this case, highlighting the importance of considering users' personalized preferences in the reranker." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 51, + 514, + 192, + 527 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 514, + 192, + 527 + ], + "spans": [ + { + "bbox": [ + 51, + 514, + 192, + 527 + ], + "type": "text", + "content": "5.4 Experimental Analysis" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 50, + 529, + 295, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 529, + 295, + 628 + ], + "spans": [ + { + "bbox": [ + 50, + 529, + 295, + 628 + ], + "type": "text", + "content": "As mentioned in Section 1, adapting collaborative filtering into personalized RAG faces two challenges. Challenge 1: How to introduce collaborative information? Challenge 2: How to retrieve documents that support personalized LLM generation? In this section, we conduct experimental analysis to further demonstrate the effectiveness of our method in addressing these two challenges. Additionally, we provide further analysis of the results of CFRAG and the impact of hyper-parameters. Due to space limitations, we conducted experimental analysis on the LaMP-1 and LaMP-5 datasets." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "text", + "content": "5.4.1 Effectiveness of User Retrieval using Contrastive Learning (Challenge 1). As described in Section 1, to address Challenge 1, we train user embeddings using contrastive learning to retrieve the top-" + }, + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "text", + "content": " most similar users for introducing collaborative information. To validate the effectiveness of this approach, we compared it with randomly selecting " + }, + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "text", + "content": " users and selecting users from top-" + }, + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "inline_equation", + "content": "2m" + }, + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "text", + "content": ", as shown in Figure 5. First, we can see that randomly selecting" + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 320, + 87, + 434, + 156 + ], + "blocks": [ + { + "bbox": [ + 320, + 87, + 434, + 156 + ], + "lines": [ + { + "bbox": [ + 320, + 87, + 434, + 156 + ], + "spans": [ + { + "bbox": [ + 320, + 87, + 434, + 156 + ], + "type": "image", + "image_path": "9c481f693eb54eb4efbf0d5c0f2af5aa8f4d150f073794b47f3ad1f2fb237cf5.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 360, + 159, + 394, + 169 + ], + "lines": [ + { + "bbox": [ + 360, + 159, + 394, + 169 + ], + "spans": [ + { + "bbox": [ + 360, + 159, + 394, + 169 + ], + "type": "text", + "content": "(a) LaMP-1" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 440, + 86, + 555, + 156 + ], + "blocks": [ + { + "bbox": [ + 440, + 86, + 555, + 156 + ], + "lines": [ + { + "bbox": [ + 440, + 86, + 555, + 156 + ], + "spans": [ + { + "bbox": [ + 440, + 86, + 555, + 156 + ], + "type": "image", + "image_path": "acb69f3db4e448b0e25bc42ebbf3669d4e4b97072a06599b923f91b4cb5074dc.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 481, + 159, + 515, + 169 + ], + "lines": [ + { + "bbox": [ + 481, + 159, + 515, + 169 + ], + "spans": [ + { + "bbox": [ + 481, + 159, + 515, + 169 + ], + "type": "text", + "content": "(b) LaMP-5" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 319, + 230, + 433, + 300 + ], + "blocks": [ + { + "bbox": [ + 314, + 185, + 559, + 217 + ], + "lines": [ + { + "bbox": [ + 314, + 185, + 559, + 217 + ], + "spans": [ + { + "bbox": [ + 314, + 185, + 559, + 217 + ], + "type": "text", + "content": "Figure 8: Performance under different numbers of retrieved users. The performance is the worst since no collaborative information is introduced when " + }, + { + "bbox": [ + 314, + 185, + 559, + 217 + ], + "type": "inline_equation", + "content": "m = 1" + }, + { + "bbox": [ + 314, + 185, + 559, + 217 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 319, + 230, + 433, + 300 + ], + "lines": [ + { + "bbox": [ + 319, + 230, + 433, + 300 + ], + "spans": [ + { + "bbox": [ + 319, + 230, + 433, + 300 + ], + "type": "image", + "image_path": "83cde8aa2ad4ce4f601933f54a812e13703cb17f9d80156a716ccbf53ac36f6e.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 360, + 304, + 393, + 312 + ], + "lines": [ + { + "bbox": [ + 360, + 304, + 393, + 312 + ], + "spans": [ + { + "bbox": [ + 360, + 304, + 393, + 312 + ], + "type": "text", + "content": "(a) LaMP-1" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 314, + 328, + 559, + 351 + ], + "lines": [ + { + "bbox": [ + 314, + 328, + 559, + 351 + ], + "spans": [ + { + "bbox": [ + 314, + 328, + 559, + 351 + ], + "type": "text", + "content": "Figure 9: Performance under different numbers of retrieved documents per user." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 440, + 230, + 555, + 300 + ], + "blocks": [ + { + "bbox": [ + 440, + 230, + 555, + 300 + ], + "lines": [ + { + "bbox": [ + 440, + 230, + 555, + 300 + ], + "spans": [ + { + "bbox": [ + 440, + 230, + 555, + 300 + ], + "type": "image", + "image_path": "208072a2dd51268962130922f22ad7a74e36f3ec865e34dc65a393a56cbe2a6c.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 481, + 304, + 515, + 313 + ], + "lines": [ + { + "bbox": [ + 481, + 304, + 515, + 313 + ], + "spans": [ + { + "bbox": [ + 481, + 304, + 515, + 313 + ], + "type": "text", + "content": "(b) LaMP-5" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "bbox": [ + 313, + 366, + 559, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 366, + 559, + 443 + ], + "spans": [ + { + "bbox": [ + 313, + 366, + 559, + 443 + ], + "type": "text", + "content": "users yields the worst performance, indicating that collaborative information cannot be introduced indiscriminately. Secondly, the results show that retrieving users from the range of top-" + }, + { + "bbox": [ + 313, + 366, + 559, + 443 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 313, + 366, + 559, + 443 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 313, + 366, + 559, + 443 + ], + "type": "inline_equation", + "content": "2m" + }, + { + "bbox": [ + 313, + 366, + 559, + 443 + ], + "type": "text", + "content": " performs worse than using the top-" + }, + { + "bbox": [ + 313, + 366, + 559, + 443 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 313, + 366, + 559, + 443 + ], + "type": "text", + "content": " users, suggesting that information from users who are more similar to the current user " + }, + { + "bbox": [ + 313, + 366, + 559, + 443 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 313, + 366, + 559, + 443 + ], + "type": "text", + "content": " is more important. These highlight the importance of retrieving the most similar top-" + }, + { + "bbox": [ + 313, + 366, + 559, + 443 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 313, + 366, + 559, + 443 + ], + "type": "text", + "content": " users" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 313, + 450, + 560, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 450, + 560, + 570 + ], + "spans": [ + { + "bbox": [ + 313, + 450, + 560, + 570 + ], + "type": "text", + "content": "5.4.2 Effectiveness of Document Retrieval using LLM Feedback (Challenge 2). As mentioned in Section 1, to address Challenge 2, we fine-tune the retriever and reranker using feedback from the content generated by the LLM, enabling them to retrieve documents that better meet personalized LLM generation needs. To validate its effectiveness, we compared the results with those using retrievers and rerankers without LLM feedback fine-tuning, as well as using BM25 as the retriever and reranker, as shown in Figure 6. It can be observed that CFRAG performs the best, highlighting the importance of fine-tuning with LLM feedback rather than relying solely on semantic relevance." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 313, + 578, + 560, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 578, + 560, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 578, + 560, + 710 + ], + "type": "text", + "content": "5.4.3 Impact of the Number of Documents from the Current User. To further validate that CFRAG enhances personalization by incorporating collaborative information, we observed the impact of the number of documents from the current user in the final top-" + }, + { + "bbox": [ + 313, + 578, + 560, + 710 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 313, + 578, + 560, + 710 + ], + "type": "text", + "content": " documents on the results, as shown in Figure 7. We varied the number of documents retrieved from the current user's history in the top-" + }, + { + "bbox": [ + 313, + 578, + 560, + 710 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 313, + 578, + 560, + 710 + ], + "type": "text", + "content": " documents from 0 to 5, with the remaining documents retrieved from similar users' histories. The results indicate that retrieving only from the current user's history leads to poor performance, while appropriately retrieving documents from similar users' histories significantly improves the results. This verifies the importance of incorporating collaborative information." + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 171, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 171, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 171, + 68 + ], + "type": "text", + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 515, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 515, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 515, + 60, + 558, + 68 + ], + "type": "text", + "content": "Teng Shi et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 57, + 141, + 555, + 492 + ], + "blocks": [ + { + "bbox": [ + 50, + 82, + 560, + 138 + ], + "lines": [ + { + "bbox": [ + 50, + 82, + 560, + 138 + ], + "spans": [ + { + "bbox": [ + 50, + 82, + 560, + 138 + ], + "type": "text", + "content": "Table 4: The format of input, output, and user history for different datasets in the LaMP [32] benchmark. In the input, " + }, + { + "bbox": [ + 50, + 82, + 560, + 138 + ], + "type": "inline_equation", + "content": "\\{history_{i}\\}" + }, + { + "bbox": [ + 50, + 82, + 560, + 138 + ], + "type": "text", + "content": " will be replaced by the retrieved " + }, + { + "bbox": [ + 50, + 82, + 560, + 138 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 50, + 82, + 560, + 138 + ], + "type": "text", + "content": "-th history, and each history is represented as shown in the \"User History\" column. The other italicized text in the input is replaced with the user's input. For text generation tasks, to ensure that the LLM does not generate irrelevant information, we instruct the LLM in the input to generate in JSON format, and then we extract the LLM's prediction from the JSON-formatted output." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 57, + 141, + 555, + 492 + ], + "lines": [ + { + "bbox": [ + 57, + 141, + 555, + 492 + ], + "spans": [ + { + "bbox": [ + 57, + 141, + 555, + 492 + ], + "type": "table", + "html": "
TaskInputOutputUser History
LaMP-1The historical profiles are as follows: {history1} ... {historyk}. \nBased on the historical profiles provided, please choose one of the following two references that is more relevant to the user's input title: [1] {reference1}; [2] {reference2}. Please just answer with “[1” or “[2” without explanation. “title”: {title}.[1]“title”: {title}\n“abstract”: {abstract}
LaMP-2The historical profiles are as follows: {history1} ... {historyk}. \nBased on the historical profiles provided, please select the tag from [sci-fi, based on a book, comedy ... ] that is most relevant to the user's input description. Please just answer with the tag name without explanation. “description”: {description}; “tag”:comedy“description”: {description};\n“tag”: {tag}
LaMP-3The historical profiles are as follows: {history1} ... {historyk}. \nBased on the historical profiles provided, what is the score of the following review on a scale of 1 to 5? just answer with 1, 2, 3, 4, or 5 without further explanation. “review”: {review}; “score”:5“review”: {review}\n“score”: {score}
LaMP-4The historical profiles are as follows: {history1} ... {historyk}. \nBased on the historical profiles provided, please generate a title for the given user's input text. Please generate it in the following format: {"title": "generated title"} without explanation, and use only English. “text”: {text}; “title”:{“title”: Finding Happiness \nAfter Divorce - It Can Happen}“text”: {text}\n“title”: {title}
LaMP-5The historical profiles are as follows: {history1} ... {historyk}. \nBased on the historical profiles provided, please generate a title for the given user's input abstract. Please generate it in the following format: {"title": "generated title"} without explanation, and use only English. “abstract”: {abstract}; “title”:{“title”: Link-Reliability Based \nTwo-Hop Routing for \nWireless Sensor Networks.}“abstract”: {abstract}\n“title”: {title}
LaMP-7The historical profiles are as follows: {history1} ... {historyk}. \nBased on the style pattern of the historical tweets provided, please paraphrase the user's input tweet without any explanation before or after it. Please generate it in the following format: {"tweet": "generated tweet"} without explanation, and use only English. “tweet”: {tweet}.{“tweet”:lilxcutiesworld the \ndanny picture is GOOD!! \nI really like it.}“tweet”: {tweet}
", + "image_path": "6cea494ce69dd19e4155bbac2bdb81b8052ffdd33a3f5e4661fcc323a21cbc4f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 500, + 295, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 500, + 295, + 610 + ], + "spans": [ + { + "bbox": [ + 50, + 500, + 295, + 610 + ], + "type": "text", + "content": "5.4.4 Impact of the Number of Retrieved Users. Since we enhance personalized text generation by introducing collaborative filtering, we further explored how much collaborative information to introduce, specifically the impact of the number of retrieved users on the results, as shown in Figure 8. In LaMP-1, retrieving too few or too many users leads to poorer performance, with the best results at 4 users. In LaMP-5, the performance improves as the number of users increases. This highlights the importance of introducing collaborative filtering, but it also indicates that excessive introduction can lead to decreased effectiveness." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 622, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 622, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 622, + 295, + 710 + ], + "type": "text", + "content": "5.4.5 Impact of the Number of Retrieved Documents. We also analyzed the impact of the number of retrieved documents, " + }, + { + "bbox": [ + 50, + 622, + 295, + 710 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 50, + 622, + 295, + 710 + ], + "type": "text", + "content": ", on the results, as shown in Figure 9. It can be observed that as the number of retrieved documents increases, performance improves, indicating the importance of retrieving user history to reflect user preferences for enhancing LLM-generated results. Since more documents lead to longer prompts and slower LLM generation, we chose " + }, + { + "bbox": [ + 50, + 622, + 295, + 710 + ], + "type": "inline_equation", + "content": "k = 5" + }, + { + "bbox": [ + 50, + 622, + 295, + 710 + ], + "type": "text", + "content": " for our experiments." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 315, + 499, + 391, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 499, + 391, + 510 + ], + "spans": [ + { + "bbox": [ + 315, + 499, + 391, + 510 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 514, + 560, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 514, + 560, + 634 + ], + "spans": [ + { + "bbox": [ + 313, + 514, + 560, + 634 + ], + "type": "text", + "content": "In this paper, we propose CFRAG, which adapts collaborative filtering into RAG to personalize LLMs. To introduce collaborative information without explicit user labels and retrieve documents that support personalized LLM generation, we first train user embeddings through contrastive learning to retrieve similar users. Then, we design the personalized retriever and reranker that considers user preferences during retrieval and fine-tune them using LLM feedback. The results on the Language Model Personalization (LaMP) benchmark validate the effectiveness of CFRAG. The experimental analysis also confirms the effectiveness of each module within CFRAG." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 315, + 673, + 434, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 673, + 434, + 685 + ], + "spans": [ + { + "bbox": [ + 315, + 673, + 434, + 685 + ], + "type": "text", + "content": "A Appendix: Prompts" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 314, + 687, + 560, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 687, + 560, + 709 + ], + "spans": [ + { + "bbox": [ + 314, + 687, + 560, + 709 + ], + "type": "text", + "content": "We provide detailed formats for the inputs, outputs, and user histories for the LLM across different datasets, as shown in Table 4." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 326, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 326, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 326, + 69 + ], + "type": "text", + "content": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 440, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 440, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 440, + 60, + 559, + 69 + ], + "type": "text", + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 83, + 108, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 83, + 108, + 95 + ], + "spans": [ + { + "bbox": [ + 52, + 83, + 108, + 95 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 97, + 295, + 710 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 56, + 97, + 294, + 113 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 97, + 294, + 113 + ], + "spans": [ + { + "bbox": [ + 56, + 97, + 294, + 113 + ], + "type": "text", + "content": "[1] AI@Meta. 2024. Llama 3 Model Card. (2024). https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 114, + 295, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 114, + 295, + 137 + ], + "spans": [ + { + "bbox": [ + 56, + 114, + 295, + 137 + ], + "type": "text", + "content": "[2] Akari Asai, Zegiu Wu, Yizhong Wang, Avirup Sil, and Hannaneh Hajishirzi. [n.d.]. Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection. In The Twelfth International Conference on Learning Representations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 137, + 294, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 137, + 294, + 161 + ], + "spans": [ + { + "bbox": [ + 56, + 137, + 294, + 161 + ], + "type": "text", + "content": "[3] Sebastian Borgeaud, Arthur Mensch, et al. 2022. Improving language models by retrieving from trillions of tokens. In International conference on machine learning, PMLR, 2206-2240." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 162, + 294, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 162, + 294, + 184 + ], + "spans": [ + { + "bbox": [ + 56, + 162, + 294, + 184 + ], + "type": "text", + "content": "[4] Jin Chen, Zheng Liu, et al. 2024. When large language models meet personalization: Perspectives of challenges and opportunities. World Wide Web 27, 4 (2024), 42." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 185, + 295, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 185, + 295, + 209 + ], + "spans": [ + { + "bbox": [ + 56, + 185, + 295, + 209 + ], + "type": "text", + "content": "[5] Sunhao Dai, Ninglu Shao, et al. 2023. Uncovering chatgpt's capabilities in recommender systems. In Proceedings of the 17th ACM Conference on Recommender Systems. 1126-1132." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 209, + 294, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 209, + 294, + 249 + ], + "spans": [ + { + "bbox": [ + 56, + 209, + 294, + 249 + ], + "type": "text", + "content": "[6] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 250, + 294, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 250, + 294, + 281 + ], + "spans": [ + { + "bbox": [ + 56, + 250, + 294, + 281 + ], + "type": "text", + "content": "[7] Wenqi Fan, Yujuan Ding, Liangbo Ning, Shijie Wang, Hengyun Li, Dawei Yin, Tat-Seng Chua, and Qing Li. 2024. A Survey on RAG Meeting LLMs: Towards Retrieval-Augmented Large Language Models. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, 6491-6501." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 281, + 294, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 281, + 294, + 304 + ], + "spans": [ + { + "bbox": [ + 56, + 281, + 294, + 304 + ], + "type": "text", + "content": "[8] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, and Haofen Wang. 2023. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997 (2023)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 305, + 294, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 305, + 294, + 328 + ], + "spans": [ + { + "bbox": [ + 56, + 305, + 294, + 328 + ], + "type": "text", + "content": "[9] Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. 2020. Retrieval augmented language model pre-training. In International conference on machine learning. PMLR, 3929-3938." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 329, + 294, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 329, + 294, + 360 + ], + "spans": [ + { + "bbox": [ + 53, + 329, + 294, + 360 + ], + "type": "text", + "content": "[10] Xiangnan He, Kuan Deng, Xiang Wang, Yan Li, Yongdong Zhang, and Meng Wang. 2020. Lightgcn: Simplifying and powering graph convolution network for recommendation. In Proceedings of the 43rd International ACM SIGIR conference on research and development in Information Retrieval. 639-648." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 361, + 294, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 361, + 294, + 384 + ], + "spans": [ + { + "bbox": [ + 53, + 361, + 294, + 384 + ], + "type": "text", + "content": "[11] Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. 2017. Neural collaborative filtering. In Proceedings of the 26th international conference on world wide web. 173-182." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 384, + 294, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 384, + 294, + 408 + ], + "spans": [ + { + "bbox": [ + 53, + 384, + 294, + 408 + ], + "type": "text", + "content": "[12] Edward J Hu, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. [n.d.]. LoRA: Low-Rank Adaptation of Large Language Models. In International Conference on Learning Representations." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 53, + 409, + 294, + 440 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 409, + 294, + 440 + ], + "spans": [ + { + "bbox": [ + 53, + 409, + 294, + 440 + ], + "type": "text", + "content": "[13] Gautier Izacard and Edouard Grave. 2021. Leveraging Passage Retrieval with Generative Models for Open Domain Question Answering. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume. 874-880." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 53, + 441, + 294, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 441, + 294, + 456 + ], + "spans": [ + { + "bbox": [ + 53, + 441, + 294, + 456 + ], + "type": "text", + "content": "[14] Gautier Izacard, Patrick Lewis, et al. 2022. Few-shot learning with retrieval augmented language models. arXiv preprint arXiv:2208.03299 1, 2 (2022), 4." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 53, + 457, + 294, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 457, + 294, + 480 + ], + "spans": [ + { + "bbox": [ + 53, + 457, + 294, + 480 + ], + "type": "text", + "content": "[15] Ashish Jaiswal, Ashwin Ramesh Babu, Mohammad Zaki Zadeh, Debapriya Banerjee, and Fillia Makedon. 2020. A survey on contrastive self-supervised learning. Technologies 9, 1 (2020), 2." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 53, + 481, + 294, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 481, + 294, + 504 + ], + "spans": [ + { + "bbox": [ + 53, + 481, + 294, + 504 + ], + "type": "text", + "content": "[16] Joel Jang, Seungone Kim, et al. 2023. Personalized soups: Personalized large language model alignment via post-hoc parameter merging. arXiv preprint arXiv:2310.11564 (2023)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 53, + 505, + 294, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 505, + 294, + 528 + ], + "spans": [ + { + "bbox": [ + 53, + 505, + 294, + 528 + ], + "type": "text", + "content": "[17] Nikhil Kandpal, Haikang Deng, Adam Roberts, Eric Wallace, and Colin Raffel. 2023. Large language models struggle to learn long-tail knowledge. In International Conference on Machine Learning. PMLR, 15696-15707." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 53, + 529, + 294, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 529, + 294, + 544 + ], + "spans": [ + { + "bbox": [ + 53, + 529, + 294, + 544 + ], + "type": "text", + "content": "[18] Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 53, + 544, + 294, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 544, + 294, + 559 + ], + "spans": [ + { + "bbox": [ + 53, + 544, + 294, + 559 + ], + "type": "text", + "content": "[19] Yehuda Koren, Robert Bell, and Chris Volinsky. 2009. Matrix factorization techniques for recommender systems. Computer 42, 8 (2009), 30-37." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 53, + 560, + 294, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 560, + 294, + 583 + ], + "spans": [ + { + "bbox": [ + 53, + 560, + 294, + 583 + ], + "type": "text", + "content": "[20] Patrick Lewis, Ethan Perez, et al. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems 33 (2020), 9459-9474." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 53, + 584, + 294, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 584, + 294, + 608 + ], + "spans": [ + { + "bbox": [ + 53, + 584, + 294, + 608 + ], + "type": "text", + "content": "[21] Cheng Li, Mingyang Zhang, Qiaozhu Mei, Yaqing Wang, Spurthi Amba Hombaiah, Yi Liang, and Michael Bendersky. 2023. Teach LLMs to Personalize-An Approach inspired by Writing Education. arXiv preprint arXiv:2308.07968 (2023)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 53, + 609, + 294, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 609, + 294, + 632 + ], + "spans": [ + { + "bbox": [ + 53, + 609, + 294, + 632 + ], + "type": "text", + "content": "[22] Junyi Li, Tianyi Tang, Wayne Xin Zhao, Jian-Yun Nie, and Ji-Rong Wen. 2024. Pre-trained language models for text generation: A survey. Comput. Surveys 56, 9 (2024), 1-39." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 53, + 632, + 294, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 632, + 294, + 647 + ], + "spans": [ + { + "bbox": [ + 53, + 632, + 294, + 647 + ], + "type": "text", + "content": "[23] Xinyu Li, Zachary C Lipton, and Liu Leqi. 2024. Personalized language modeling from personalized human feedback. arXiv preprint arXiv:2402.05133 (2024)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 53, + 648, + 294, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 648, + 294, + 663 + ], + "spans": [ + { + "bbox": [ + 53, + 648, + 294, + 663 + ], + "type": "text", + "content": "[24] Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out. 74-81." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 53, + 664, + 294, + 695 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 664, + 294, + 695 + ], + "spans": [ + { + "bbox": [ + 53, + 664, + 294, + 695 + ], + "type": "text", + "content": "[25] Xi Victoria Lin, Xilun Chen, Mingda Chen, Weijia Shi, Maria Lomeli, Richard James, Pedro Rodriguez, Jacob Kahn, Gergely Szilvasy, Mike Lewis, et al. [n.d.]. RA-DIT: Retrieval-Augmented Dual Instruction Tuning. In The Twelfth International Conference on Learning Representations." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 53, + 696, + 294, + 710 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 696, + 294, + 710 + ], + "spans": [ + { + "bbox": [ + 53, + 696, + 294, + 710 + ], + "type": "text", + "content": "[26] Yinhan Liu. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692 (2019)." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 86, + 558, + 708 + ], + "type": "list", + "angle": 0, + "index": 54, + "blocks": [ + { + "bbox": [ + 316, + 86, + 558, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 86, + 558, + 110 + ], + "spans": [ + { + "bbox": [ + 316, + 86, + 558, + 110 + ], + "type": "text", + "content": "[27] Sheshera Mysore, Zhuoran Lu, et al. 2023. Pearl: Personalizing large language model writing assistants with generation-calibrated retrievers. arXiv preprint arXiv:2311.09180 (2023)." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 317, + 111, + 558, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 111, + 558, + 126 + ], + "spans": [ + { + "bbox": [ + 317, + 111, + 558, + 126 + ], + "type": "text", + "content": "[28] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. 2018. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748 (2018)." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 317, + 127, + 558, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 127, + 558, + 158 + ], + "spans": [ + { + "bbox": [ + 317, + 127, + 558, + 158 + ], + "type": "text", + "content": "[29] Chris Richardson, Yao Zhang, Kellen Gillespie, Sudipta Kar, Arshdeep Singh, Zeynab Raeesy, Omar Zia Khan, and Abhinav Sethy. 2023. Integrating summarization and retrieval for enhanced personalization via large language models. arXiv preprint arXiv:2310.20081 (2023)." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 317, + 159, + 558, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 159, + 558, + 182 + ], + "spans": [ + { + "bbox": [ + 317, + 159, + 558, + 182 + ], + "type": "text", + "content": "[30] Stephen E Robertson, Steve Walker, Susan Jones, Micheline M Hancock-Beaulieu, Mike Gatford, et al. 1995. Okapi at TREC-3. Nist Special Publication Sp 109 (1995), 109." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 317, + 182, + 558, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 182, + 558, + 213 + ], + "spans": [ + { + "bbox": [ + 317, + 182, + 558, + 213 + ], + "type": "text", + "content": "[31] Alireza Salemi, Surya Kallumadi, and Hamed Zamani. 2024. Optimization methods for personalizing large language models through retrieval augmentation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 752-762." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 317, + 214, + 558, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 214, + 558, + 237 + ], + "spans": [ + { + "bbox": [ + 317, + 214, + 558, + 237 + ], + "type": "text", + "content": "[32] Alireza Salemi, Sheshera Mysore, Michael Bendersky, and Hamed Zamani. 2023. Lamp: When large language models meet personalization. arXiv preprint arXiv:2304.11406 (2023)." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 317, + 238, + 558, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 238, + 558, + 261 + ], + "spans": [ + { + "bbox": [ + 317, + 238, + 558, + 261 + ], + "type": "text", + "content": "[33] Chenglei Shen, Xiao Zhang, Teng Shi, Changshuo Zhang, Guofu Xie, and Jun Xu. 2024. A survey of controllable learning: Methods and applications in information retrieval. arXiv preprint arXiv:2407.06083 (2024)." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 317, + 262, + 558, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 262, + 558, + 301 + ], + "spans": [ + { + "bbox": [ + 317, + 262, + 558, + 301 + ], + "type": "text", + "content": "[34] Teng Shi, Zihua Si, Jun Xu, Xiao Zhang, Xiaoxue Zang, Kai Zheng, Dewei Leng, Yanan Niu, and Yang Song. 2024. UniSAR: Modeling User Transition Behaviors between Search and Recommendation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1029-1039." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 317, + 302, + 558, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 302, + 558, + 342 + ], + "spans": [ + { + "bbox": [ + 317, + 302, + 558, + 342 + ], + "type": "text", + "content": "[35] Weijia Shi, Sewon Min, Michihiro Yasunaga, Minjoon Seo, Richard James, Mike Lewis, Luke Zettlemoyer, and Wen-tau Yih. 2024. REPLUG: Retrieval-Augmented Black-Box Language Models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers). 8364-8377." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 317, + 342, + 558, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 342, + 558, + 365 + ], + "spans": [ + { + "bbox": [ + 317, + 342, + 558, + 365 + ], + "type": "text", + "content": "[36] Zhaoxuan Tan, Zheyuan Liu, and Meng Jiang. 2024. Personalized Pieces: Efficient Personalized Large Language Models through Collaborative Efforts. arXiv preprint arXiv:2406.10471 (2024)." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 317, + 366, + 558, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 366, + 558, + 389 + ], + "spans": [ + { + "bbox": [ + 317, + 366, + 558, + 389 + ], + "type": "text", + "content": "[37] Zhaoxuan Tan, Qingkai Zeng, Yijun Tian, Zheyuan Liu, Bing Yin, and Meng Jiang. 2024. Democratizing Large Language Models via Personalized Parameter-Efficient Fine-tuning. arXiv:2402.04401 [cs.CL] https://arxiv.org/abs/2402.04401" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 317, + 390, + 558, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 390, + 558, + 421 + ], + "spans": [ + { + "bbox": [ + 317, + 390, + 558, + 421 + ], + "type": "text", + "content": "[38] Jiakai Tang, Sunhao Dai, Teng Shi, Jun Xu, Xu Chen, Wen Chen, Wu Jian, and Yuning Jiang. 2025. Think Before Recommend: Unleashing the Latent Reasoning Power for Sequential Recommendation. arXiv:2503.22675 [cs.IR] https://arxiv.org/abs/2503.22675" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 317, + 422, + 558, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 422, + 558, + 437 + ], + "spans": [ + { + "bbox": [ + 317, + 422, + 558, + 437 + ], + "type": "text", + "content": "[39] A Vaswani. 2017. Attention is all you need. Advances in Neural Information Processing Systems (2017)." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 317, + 437, + 558, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 437, + 558, + 461 + ], + "spans": [ + { + "bbox": [ + 317, + 437, + 558, + 461 + ], + "type": "text", + "content": "[40] Xiang Wang, Xiangnan He, Meng Wang, Fuli Feng, and Tat-Seng Chua. 2019. Neural graph collaborative filtering. In Proceedings of the 42nd international ACM SIGIR conference on Research and development in Information Retrieval. 165-174." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 317, + 462, + 558, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 462, + 558, + 485 + ], + "spans": [ + { + "bbox": [ + 317, + 462, + 558, + 485 + ], + "type": "text", + "content": "[41] Likang Wu, Zhi Zheng, Zhaopeng Qiu, Hao Wang, Hongchao Gu, Tingjia Shen, Chuan Qin, Chen Zhu, Hengshu Zhu, Qi Liu, et al. 2024. A survey on large language models for recommendation. World Wide Web 27, 5 (2024), 60." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 317, + 486, + 558, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 486, + 558, + 509 + ], + "spans": [ + { + "bbox": [ + 317, + 486, + 558, + 509 + ], + "type": "text", + "content": "[42] Xinghao Wu, Xuefeng Liu, Jianwei Niu, Haolin Wang, Shaojie Tang, and Guogang Zhu. 2024. FedLoRA: When Personalized Federated Learning Meets Low-Rank Adaptation. (2024)." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 317, + 510, + 558, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 510, + 558, + 540 + ], + "spans": [ + { + "bbox": [ + 317, + 510, + 558, + 540 + ], + "type": "text", + "content": "[43] Zeqiu Wu, Yushi Hu, Weijia Shi, Nouha Dziri, Alane Suhr, Prithviraj Ammanabrolu, Noah A Smith, Mari Ostendorf, and Hannaneh Hajishirzi. 2024. Fine-grained human feedback gives better rewards for language model training. Advances in Neural Information Processing Systems 36 (2024)." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 317, + 541, + 558, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 541, + 558, + 564 + ], + "spans": [ + { + "bbox": [ + 317, + 541, + 558, + 564 + ], + "type": "text", + "content": "[44] Zhuofeng Wu, Sinong Wang, Jiatao Gu, Madian Khabsa, Fei Sun, and Hao Ma. 2020. Clear: Contrastive learning for sentence representation. arXiv preprint arXiv:2012.15466 (2020)." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 317, + 565, + 558, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 565, + 558, + 589 + ], + "spans": [ + { + "bbox": [ + 317, + 565, + 558, + 589 + ], + "type": "text", + "content": "[45] Shitao Xiao, Zheng Liu, Peitian Zhang, and Niklas Muennighoff. 2023. C-Pack: Packaged Resources To Advance General Chinese Embedding. arXiv:2309.07597 [cs.CL]" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 317, + 590, + 558, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 590, + 558, + 612 + ], + "spans": [ + { + "bbox": [ + 317, + 590, + 558, + 612 + ], + "type": "text", + "content": "[46] Hong-Jian Xue, Xinyu Dai, Jianbing Zhang, Shujian Huang, and Jiajun Chen. 2017. Deep matrix factorization models for recommender systems.. In IJCAI, Vol. 17. Melbourne, Australia, 3203-3209." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 317, + 613, + 558, + 636 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 613, + 558, + 636 + ], + "spans": [ + { + "bbox": [ + 317, + 613, + 558, + 636 + ], + "type": "text", + "content": "[47] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Cheng-peng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. 2024. Qwen2 technical report. arXiv preprint arXiv:2407.10671 (2024)." + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 317, + 637, + 558, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 637, + 558, + 661 + ], + "spans": [ + { + "bbox": [ + 317, + 637, + 558, + 661 + ], + "type": "text", + "content": "[48] Changshuo Zhang, Teng Shi, Xiao Zhang, Qi Liu, Ruobing Xie, Jun Xu, and Ji-Rong Wen. 2024. Modeling Domain and Feedback Transitions for Cross-Domain Sequential Recommendation. arXiv preprint arXiv:2408.08209 (2024)." + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 317, + 662, + 558, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 662, + 558, + 685 + ], + "spans": [ + { + "bbox": [ + 317, + 662, + 558, + 685 + ], + "type": "text", + "content": "[49] Changshuo Zhang, Teng Shi, Xiao Zhang, Yanping Zheng, Ruobing Xie, Qi Liu, Jun Xu, and Ji-Rong Wen. 2024. QAGCF: Graph Collaborative Filtering for Q&A Recommendation. arXiv preprint arXiv:2406.04828 (2024)." + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 317, + 685, + 558, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 685, + 558, + 708 + ], + "spans": [ + { + "bbox": [ + 317, + 685, + 558, + 708 + ], + "type": "text", + "content": "[50] Changshuo Zhang, Xiao Zhang, Teng Shi, Jun Xu, and Ji-Rong Wen. 2025. Test-Time Alignment for Tracking User Interest Shifts in Sequential Recommendation. arXiv:2504.01489 [cs.IR] https://arxiv.org/abs/2504.01489" + } + ] + } + ], + "index": 53 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 171, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 171, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 171, + 69 + ], + "type": "text", + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 515, + 60, + 558, + 69 + ], + "type": "text", + "content": "Teng Shi et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 86, + 296, + 175 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 52, + 86, + 296, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 86, + 296, + 119 + ], + "spans": [ + { + "bbox": [ + 52, + 86, + 296, + 119 + ], + "type": "text", + "content": "[51] Kepu Zhang, Teng Shi, Sunhao Dai, Xiao Zhang, Yinfeng Li, Jing Lu, Xiaoxue Zang, Yang Song, and Jun Xu. 2024. SAQRec: Aligning Recommender Systems to User Satisfaction via Questionnaire Feedback. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 3165-3175." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 119, + 296, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 119, + 296, + 144 + ], + "spans": [ + { + "bbox": [ + 53, + 119, + 296, + 144 + ], + "type": "text", + "content": "[52] Xiao Zhang, Teng Shi, Jun Xu, Zhenhua Dong, and Ji-Rong Wen. 2024. Model-Agnostic Causal Embedding Learning for Counterfactually Group-Fair Recommendation. IEEE Transactions on Knowledge and Data Engineering (2024)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 144, + 294, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 144, + 294, + 175 + ], + "spans": [ + { + "bbox": [ + 53, + 144, + 294, + 175 + ], + "type": "text", + "content": "[53] Yue Zhang, Yafu Li, Leyang Cui, Deng Cai, Lemao Liu, Tingchen Fu, Xinting Huang, Enbo Zhao, Yu Zhang, Yulong Chen, et al. 2023. Siren's song in the AI ocean: a survey on hallucination in large language models. arXiv preprint arXiv:2309.01219 (2023)." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 86, + 559, + 182 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 316, + 86, + 558, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 86, + 558, + 110 + ], + "spans": [ + { + "bbox": [ + 316, + 86, + 558, + 110 + ], + "type": "text", + "content": "[54] Wayne Xin Zhao, Jing Liu, Ruiyang Ren, and Ji-Rong Wen. 2024. Dense text retrieval based on pretrained language models: A survey. ACM Transactions on Information Systems 42, 4 (2024), 1-60." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 317, + 111, + 559, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 111, + 559, + 135 + ], + "spans": [ + { + "bbox": [ + 317, + 111, + 559, + 135 + ], + "type": "text", + "content": "[55] Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, et al. 2023. A survey of large language models. arXiv preprint arXiv:2303.18223 (2023)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 316, + 135, + 559, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 135, + 559, + 159 + ], + "spans": [ + { + "bbox": [ + 316, + 135, + 559, + 159 + ], + "type": "text", + "content": "[56] Yutao Zhu, Huaying Yuan, Shuting Wang, Jiongnan Liu, Wenhan Liu, Chenlong Deng, Haonan Chen, Zhicheng Dou, and Ji-Rong Wen. 2023. Large language models for information retrieval: A survey. arXiv preprint arXiv:2308.07107 (2023)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 316, + 159, + 559, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 159, + 559, + 182 + ], + "spans": [ + { + "bbox": [ + 316, + 159, + 559, + 182 + ], + "type": "text", + "content": "[57] Yuchen Zhuang, Haotian Sun, Yue Yu, Qifan Wang, Chao Zhang, and Bo Dai. 2024. HYDRA: Model Factorization Framework for Black-Box LLM Personalization. arXiv preprint arXiv:2406.02888 (2024)." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 326, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 326, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 326, + 69 + ], + "type": "text", + "content": "Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 440, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 440, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 440, + 60, + 558, + 69 + ], + "type": "text", + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05741/97d4b9fa-4704-4c9a-9165-90317d2d4443_content_list.json b/data/2025/2504_05xxx/2504.05741/97d4b9fa-4704-4c9a-9165-90317d2d4443_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..67ffa72f7c5f86746c49c8383d85e117c4d4851e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/97d4b9fa-4704-4c9a-9165-90317d2d4443_content_list.json @@ -0,0 +1,2564 @@ +[ + { + "type": "text", + "text": "DDT: Decoupled Diffusion Transformer", + "text_level": 1, + "bbox": [ + 294, + 130, + 702, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shuai Wang1 Zhi Tian2 Weilin Huang2 Limin Wang1, * \n1Nanjing University 2ByteDance Seed Vision \nhttps://github.com/MCG-NJU/DDT", + "bbox": [ + 218, + 178, + 777, + 243 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0b0fcf7f95fe250a8fe6bbf44a1d260920fff8418690e8baac77374ffe3abe42.jpg", + "image_caption": [ + "(a) Our Decoupled Diffusion Transformer" + ], + "image_footnote": [], + "bbox": [ + 116, + 266, + 352, + 462 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/48dcf09eaef0520cfce1c2c76b2f54623920e1af72057912b2c72e38de56a625.jpg", + "image_caption": [ + "(b) Conventional Diffusion Transformer" + ], + "image_footnote": [], + "bbox": [ + 359, + 267, + 566, + 465 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/fc9e0189f4b580f93b7b09e6dc65626e313ee2035d702444275ea38c70f2bfb6.jpg", + "image_caption": [ + "(c) FID compared with Other Diffusion Models", + "Figure 1. Our decoupled diffusion transformer (DDT-XL/2) achieves a SoTA 1.31 FID under 256 epochs. Our decoupled diffusion transformer models incorporate a condition encoder to extract semantic self-conditions and a velocity decoder to decode velocity." + ], + "image_footnote": [], + "bbox": [ + 586, + 268, + 893, + 450 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 248, + 525, + 326, + 541 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Diffusion transformers have demonstrated remarkable generation quality, albeit requiring longer training iterations and numerous inference steps. In each denoising step, diffusion transformers encode the noisy inputs to extract the lower-frequency semantic component and then decode the higher frequency with identical modules. This scheme creates an inherent optimization dilemma: encoding low-frequency semantics necessitates reducing high-frequency components, creating tension between semantic encoding and high-frequency decoding. To resolve this challenge, we propose a new Decoupled Diffusion Transformer (DDT), with a decoupled design of a dedicated condition encoder for semantic extraction alongside a specialized velocity decoder. Our experiments reveal that a more substantial encoder yields performance improvements as model size increases. For ImageNet $256 \\times 256$ , Our DDT-XL/2 achieves a new state-of-the-art performance of 1.31 FID (nearly $4 \\times$ faster training convergence compared to previous diffusion transformers). For ImageNet $512 \\times 512$ , Our DDT-XL/2 achieves a new state-of-the-art FID of 1.28. Additionally, as a beneficial by-product, our decoupled architecture enhances inference speed by enabling the sharing self", + "bbox": [ + 86, + 568, + 483, + 902 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "condition between adjacent denoising steps. To minimize performance degradation, we propose a novel statistical dynamic programming approach to identify optimal sharing strategies.", + "bbox": [ + 511, + 527, + 906, + 588 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 513, + 635, + 643, + 651 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Image generation is a fundamental task in computer vision research, which aims at capturing the inherent data distribution of original image datasets and generating high-quality synthetic images through distribution sampling. Diffusion models [19, 21, 29, 30, 41] have recently emerged as highly promising solutions to learn the underlying data distribution in image generation, outperforming the GAN-based models [3, 40] and Auto-Regressive models [5, 43, 51].", + "bbox": [ + 509, + 661, + 906, + 782 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The diffusion forward process gradually adds Gaussian noise to the pristine data following an SDE forward schedule [19, 21, 41]. The denoising process learns the score estimation from this corruption process. Once the score function is accurately learned, data samples can be synthesized by numerically solving the reverse SDE [21, 29, 30, 41].", + "bbox": [ + 511, + 782, + 908, + 876 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.05741v2 [cs.CV] 9 Apr 2025", + "bbox": [ + 22, + 268, + 60, + 700 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": ": Corresponding author (lmwang@nju.edu.cn).", + "bbox": [ + 535, + 887, + 807, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Diffusion Transformers [32, 36] introduce the transformer architecture into diffusion models to replace the traditionally dominant UNet-based model [2, 10]. Empirical evidence suggests that, given sufficient training iterations, diffusion transformers outperform conventional approaches even without relying on long residual connections [36]. Nevertheless, their slow convergence rate still poses great challenge for developing new models due to the high cost.", + "bbox": [ + 89, + 90, + 480, + 210 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we want to tackle the aforementioned major disadvantages from a model design perspective. Classic computer vision algorithms [4, 17, 23] strategically employ encoder-decoder architectures, prioritizing large encoders for rich feature extraction and lightweight decoders for efficient inference, while contemporary diffusion models predominantly rely on conventional decoder-only structures. We systematically investigate the underexplored potential of decoupled encoder-decoder designs in diffusion transformers, by answering the question of can decoupled encoder-decoder transformer unlock the capability of accelerated convergence and enhanced sample quality?", + "bbox": [ + 89, + 214, + 480, + 396 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Through investigation experiments, we conclude that the plain diffusion transformer has an optimization dilemma between abstract structure information extraction and detailed appearance information recovery. Further, the diffusion transformer is limited in extracting semantic representation due to the raw pixel supervision [28, 52, 53]. To address this issue, we propose a new architecture to explicitly decouple low-frequency semantic encoding and high-frequency detailed decoding through a customized encoder-decoder design. We call this encoder-decoder diffusion transformer model as DDT (Decoupled Diffusion Transformer). DDT incorporates a condition encoder to extract semantic self-condition features. The extracted self-condition is fed into a velocity decoder along with the noisy latent to regress the velocity field. To maintain the local consistency of self-condition features of adjacent steps, we employ direct supervision of representation alignment and indirect supervision from the velocity regression loss of the decoder.", + "bbox": [ + 89, + 397, + 482, + 670 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In the ImageNet $256 \\times 256$ dataset, using the traditional off-shelf VAE [38], our decoupled diffusion transformer (DDT-XL/2) model achieves the state-of-the-art performance of 1.31 FID with interval guidance under only 256 epochs, approximately $4 \\times$ training acceleration compared to REPA [52]. In the ImageNet $512 \\times 512$ dataset, our DDT-XL/2 model achieves 1.28 FID within 500K finetuning steps.", + "bbox": [ + 89, + 671, + 480, + 792 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Furthermore, our DDT achieves strong local consistency on its self-condition feature from the encoder. This property can significantly boost the inference speed by sharing the self-condition between adjacent steps. We formulate the optimal encoder sharing strategy solving as a classic minimal sum path problem by minimizing the performance drop of sharing self-condition among adjacent steps. We propose", + "bbox": [ + 89, + 795, + 480, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "a statistic dynamic programming approach to find the optimal encoder sharing strategy with negligible second-level time cost. Compared with the naive uniform sharing, our dynamic programming delivers a minimal FID drop. Our contributions are summarized as follows.", + "bbox": [ + 511, + 90, + 903, + 165 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a new decoupled diffusion transformer model, which consists of a condition encoder and a velocity decoder.", + "- We propose statistic dynamic programming to find the optimal self-condition sharing strategy to boost inference speed while keeping minimal performance downgradation.", + "- In the ImageNet $256 \\times 256$ dataset, using tradition SDf8d4 VAE, our decoupled diffusion transformer (DDT-XL/2) model achieves the SoTA 1.31 FID with interval guidance under only 256 epochs, approximately $4 \\times$ training acceleration compared to REPA [52].", + "- In the ImageNet $512 \\times 512$ dataset, our DDT-XL/2 model achieves the SoTA 1.28 FID, outperforming all previous methods with a significant margin." + ], + "bbox": [ + 511, + 167, + 903, + 393 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 513, + 411, + 653, + 426 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Diffusion Transformers. The pioneering work of DiT [36] introduced transformers into diffusion models to replace the traditionally dominant UNet architecture [2, 10]. Empirical evidence demonstrates that given sufficient training iterations, diffusion transformers outperform conventional approaches even without relying on long residual connections. SiT [32] further validated the transformer architecture with linear flow diffusion. Following the simplicity and scalability of the diffusion transformer [32, 36], SD3 [12], Lumina [54], and PixArt [6, 7] introduced the diffusion transformer to more advanced text-to-image areas. Moreover, recently, diffusion transformers have dominated the text-to-video area with substantiated visual and motion quality [1, 20, 24]. Our decoupled diffusion transformer (DDT) presents a new variant within the diffusion transformer family. It achieves faster convergence by decoupling the low-frequency encoding and the high-frequency decoding.", + "bbox": [ + 511, + 438, + 906, + 710 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Fast Diffusion Training. To accelerate the training efficiency of diffusion transformers, recent advances have pursued multi-faceted optimizations. Operator-centric approaches [13, 45, 48, 49] leverage efficient attention mechanisms: linear-attention variants [13, 45, 49] reduced quadratic complexity to speed up training, while sparse-attention architectures [48] prioritized sparsely relevant token interactions. Resampling approaches [12, 16] proposed lognorm sampling [12] or loss reweighting [16] techniques to stabilize training dynamics. Representation learning enhancement approaches integrate external inductive biases:", + "bbox": [ + 511, + 734, + 908, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/90bb434a635dade7122ba293d4cd70c1b69caa74dee31e00757ff64653720253.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 96, + 90, + 295, + 244 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/4205adae321fff2cd11e1a6112d2e8212e500eb09240ca6578d94e76fa6abbc7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 295, + 90, + 496, + 244 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/8dd7b645cf3beb6c96a28f5e0aef4fa66291c343736d43a91f44bcb8296a057a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 90, + 697, + 244 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/98ee0e4b3bd272e2bf413b0c1ce26416c41201ccc216b837c3587047d88462d8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 699, + 90, + 901, + 244 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/5b111d9945677c3832d9ac16cd82db57d76f31a097ccf022f7c4a6509822f31f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 94, + 246, + 196, + 323 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/240994ca06ad9c15a377423cda559735d73e4fe5a793b86a2070d7f7a5a19be9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 196, + 246, + 295, + 323 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/3fcb27ffe48aea1ccedcd25a827cfac28fb0c50ec36b20a02dd8d0867f62749c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 295, + 246, + 397, + 323 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/1848d6f6c6f09a80d9580fe7a0472334470d7402a8f0f9ebc75f0e54f77cd2c0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 397, + 246, + 496, + 323 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/d525d165270ddca02ea8d8ee5b095dd81e4b95d3eea8b81199d47438d2761268.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 246, + 598, + 323 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/8f5b86f4867d6ab8b42c2472d952666b81efbbf0cd189ee083b33702d9a4da62.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 598, + 246, + 699, + 323 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/a9a9a05c460fbfeb08572fc2b2eba0ff359e304716e9d7dfb2b8424d8103dd3e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 699, + 246, + 799, + 323 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/844ef683faa78a606fedc9e3b46065adc7dfdff3ac5e76d82e994d3fa16edd1f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 799, + 246, + 901, + 323 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/aaea39edf7b8250bfea02aa21ada7ea2b7adefa9180fb407fa2fe28f0a5ac9d0.jpg", + "image_caption": [ + "Figure 2. Selected $256 \\times 256$ and $512 \\times 512$ resolution samples. Generated from DDT-XL/2 trained on ImageNet $256 \\times 256$ resolution and ImageNet $512 \\times 512$ resolution with CFG = 4.0." + ], + "image_footnote": [], + "bbox": [ + 94, + 324, + 196, + 402 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/3eb3bb847dbdbe1a2a47a22afa09e43d4aa5684a5ada149fd7f1037369596351.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 196, + 324, + 295, + 402 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/bfb0f79f9a742e80bcaf710e1e90883e1635dc632020949ed444d444d088c5f7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 295, + 324, + 397, + 402 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f5b1120ec986332b1b040332bca193bfbb048be5c316f804ed609cd977122e83.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 397, + 324, + 496, + 402 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/1938f29b7db82456a19fe9cf4e7e990304681543c87d5423f264908aa8700184.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 324, + 598, + 402 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/99a12143a9ba00a6686a3a5f3c2d8ab84698fe37bec78394975ec8ead2901f90.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 599, + 324, + 699, + 402 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/0af46efc8109063459d92e3ac952af1d0db4c1f4374abc2379210c2a8158df0c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 699, + 324, + 799, + 402 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/9a59fb5c4833508f282db8aecf20b1a86b1c6cb5a7ed19663e45728fc0e7c7f4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 799, + 324, + 901, + 402 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/4e983e2fbbacb92b9637d32f798ca1cd2a3d14d6e611445b59a01bd33e0f2f41.jpg", + "image_caption": [ + "Figure 3. The reverse-SDE process (generation) of SiT-XL/2 in $x$ space. There is a clear generation process from low frequency to high frequency. Most of the time is spent on generating high-frequency details (from $t = 0.4$ to $t = 1.0$ )." + ], + "image_footnote": [], + "bbox": [ + 93, + 458, + 480, + 607 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "REPA [52], RCG [27] and DoD [53] borrowed vision-specific priors into diffusion training, while masked modeling techniques [14, 15] strengthened spatial reasoning by enforcing structured feature completion during denoising. Collectively, these strategies address computational, sampling, and representational bottlenecks.", + "bbox": [ + 89, + 691, + 482, + 782 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Preliminary Analysis", + "text_level": 1, + "bbox": [ + 89, + 797, + 290, + 816 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Linear-based flow matching [29, 30, 32] represents a specialized family of diffusion models that we focus on as our primary analytical subject due to its simplicity and efficiency. For the convenience of discussion, in certain situations, diffusion and flow-matching will be used interchange-", + "bbox": [ + 89, + 824, + 483, + 902 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/6ff463028d0d43933dd6eb9fe5122b58039284839b9e9f78028cca72c059078a.jpg", + "image_caption": [ + "Figure 4. The FID50K metric of SiT-XL/2 for different timeshift values. We employ a 2-nd order Adams-like solver to collect the performance. Allocating more computation at noisy steps significantly improves the performance." + ], + "image_footnote": [], + "bbox": [ + 517, + 454, + 712, + 569 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f15e35020f438296ec0f20c07856070badcae024af68719216a172947aa89de8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 712, + 455, + 901, + 569 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ably. In this framework, $t = 0$ corresponds to the pure noise timestep.", + "bbox": [ + 511, + 645, + 905, + 675 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As illustrated in Fig. 3, diffusion models perform autoregressive refinement on spectral components [11, 37]. The diffusion transformer encodes the noisy latent to capture lower-frequency semantics before decoding higher-frequency details. However, this semantics encoding process inevitably attenuates high-frequency information, creating an optimization dilemma. This observation motivates our proposal to decouple the conventional decode-only diffusion transformer into an explicit encoder-decoder architecture.", + "bbox": [ + 511, + 680, + 906, + 830 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Lemma 1. For a linear flow-matching noise scheduler at timestep $t$ , let us denote $K_{\\text{freq}}$ as the maximum frequency of the clean data $\\mathbf{x}_{\\text{data}}$ . The maximum retained frequency", + "bbox": [ + 511, + 854, + 908, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "in the noisy latent satisfies:", + "bbox": [ + 89, + 90, + 272, + 104 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nf _ {\\max } (t) > \\min \\left(\\left(\\frac {t}{1 - t}\\right) ^ {2}, K _ {\\text {f r e q}}\\right). \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 155, + 114, + 482, + 157 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Lemma 1 is directly borrowed from [11, 37], we place the proof of Lemma 1 in Appendix. According to Lemma 1, as $t$ increases to less noisy timesteps, semantic encoding becomes easier (due to noise reduction) while decoding complexity increases (as residual frequencies grow). Consider the worst-case scenario at denoising step $t$ , the diffusion transformer encodes frequencies up to $f_{max}(t)$ , to progress to step $s$ , it must decode a residual frequency of at least $f_{max}(s) - f_{max}(t)$ . Failure to decode these residual frequencies at step $t$ creates a critical bottleneck for progression to subsequent steps. From this perspective, if allocating more of the calculations to more noisy timesteps can lead to an improvement, it means that diffusion transformers struggle with encoding lower frequency to provide semantics. Otherwise, if allocating more of the calculations to less noisy timesteps can lead to an improvement, it means that flow-matching transformers struggle with decoding higher frequency to provide fine details.", + "bbox": [ + 89, + 167, + 483, + 439 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To figure out the bottom-necks of current diffusion models, we conducted a targeted experiment using SiT-XL/2 with a second-order Adams-like linear multistep solver. As shown in Fig. 4, by varying the time-shift values, we demonstrate that allocating more computation to early timesteps improves final performance compared to uniform scheduling. This reveals that diffusion models face challenges in more noisy steps. This leads to a key conclusion: Current diffusion transformers are fundamentally constrained by their low-frequency semantic encoding capacity. This insight motivates the exploration of encoder-decoder architectures with strategic encoder parameter allocation.", + "bbox": [ + 89, + 439, + 483, + 633 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Prior researches further support this perspective. While lightweight diffusion MLP heads demonstrate limited decoding capacity, MAR [28] overcomes this limitation through semantic latents produced by its masked backbones, enabling high-quality image generation. Similarly, REPA [52] enhances low-frequency encoding through alignment with pre-trained vision foundations [35].", + "bbox": [ + 89, + 635, + 483, + 742 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Method", + "text_level": 1, + "bbox": [ + 89, + 753, + 181, + 768 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our decoupled diffusion transformer architecture comprises a condition encoder and a velocity decoder. The condition encoder extracted the low-frequency component from noisy input, class label, and timestep to serve as a self-condition for the velocity decoder; the velocity decoder processed the noisy latent with the self-condition to regress the high-frequency velocity. We train this model using the established linear flow diffusion framework. For brevity,", + "bbox": [ + 89, + 779, + 483, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "we designate our model as DDT (Decoupled Diffusion Transformer).", + "bbox": [ + 511, + 90, + 906, + 121 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Condition Encoder", + "text_level": 1, + "bbox": [ + 511, + 130, + 696, + 143 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The condition encoder mirrors the architectural design and input structure of DiT/SiT with improved micro-design. It is built with interleaved Attention and FFN blocks, without long residual connections. The encoder processes three inputs, the noisy latent $\\boldsymbol{x}_t$ , timestep $t$ , and class label $y$ to extract the self-condition feature $\\boldsymbol{z}_t$ through a series of stacked Attention and FFN blocks:", + "bbox": [ + 511, + 152, + 906, + 257 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {z} _ {t} = \\operatorname {E n c o d e r} \\left(\\boldsymbol {x} _ {t}, t, y\\right). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 624, + 268, + 906, + 287 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Specifically, the noisy latent $\\boldsymbol{x}_t$ are patched into continuous tokens and then fed to extract the self-condition $\\boldsymbol{z}_t$ with aforementioned encoder blocks. The timestep $t$ and class label $y$ serve as external-conditioning information projected into embedding. These external-condition embeddings are progressively injected into the encoded features of $\\boldsymbol{x}_t$ using AdaLN-Zero[36] within each encoder block.", + "bbox": [ + 511, + 296, + 905, + 400 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To maintain local consistency of $z_{t}$ across adjacent timesteps, we adopt the representation alignment technique from REPA [52]. Shown in Eq. (3), this method aligns the intermediate feature $\\mathbf{h}_i$ from the $i$ -th layer in the self-mapping encoder with the DINOV2 representation $r_*$ . Consistent to REPA [52], the $h_{\\phi}$ is the learnable projection MLP:", + "bbox": [ + 511, + 402, + 905, + 505 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {e n c} = 1 - \\cos \\left(r _ {*}, h _ {\\phi} \\left(\\mathbf {h} _ {\\mathbf {i}}\\right)\\right). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 612, + 507, + 903, + 525 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This simple regularization accelerates training convergence, as shown in REPA [52], and facilitates local consistency of $\\boldsymbol{z}_t$ between adjacent steps. It allows sharing the self-condition $\\boldsymbol{z}_t$ produced by the encoder between adjacent steps. Our experiments demonstrate that this encoder-sharing strategy significantly enhances inference efficiency with only negligible performance degradation.", + "bbox": [ + 511, + 529, + 905, + 633 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Additionally, the encoder also receives indirect supervision from the decoder, which we elaborate on later.", + "bbox": [ + 511, + 635, + 905, + 665 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Velocity Decoder", + "text_level": 1, + "bbox": [ + 511, + 674, + 679, + 689 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The velocity decoder adopts the same architectural design as the condition encoder and consists of several stacked interleaved Attention and FFN blocks, akin to DiT/SiT. It takes the noisy latent $\\boldsymbol{x}_t$ , timestep $t$ , and self-conditioning $\\boldsymbol{z}_t$ as inputs to estimate the velocity $\\boldsymbol{v}_t$ . Unlike the encoder, we assume that class label information is already embedded within $\\boldsymbol{z}_t$ . Thus, only the external-condition timestep $t$ and self-condition feature $\\boldsymbol{z}_t$ are used as condition inputs for the decoder blocks:", + "bbox": [ + 511, + 696, + 906, + 830 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {v} _ {t} = \\mathbf {D e c o d e r} \\left(\\boldsymbol {x} _ {t}, t, \\boldsymbol {z} _ {t}\\right). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 622, + 843, + 903, + 859 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As demonstrated previously, to further improve consistency of self-condition $z_{t}$ between adjacent steps, we employ", + "bbox": [ + 511, + 869, + 905, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "AdaLN-Zero [36] to inject $\\mathbf{z}_t$ into the decoder feature. The decoder is trained with the flow matching loss as shown in Eq. (5):", + "bbox": [ + 89, + 90, + 485, + 137 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {d e c} = \\mathbb {E} \\left[ \\int_ {0} ^ {1} \\left| \\left(\\boldsymbol {x} _ {d a t a} - \\epsilon\\right) - \\boldsymbol {v} _ {t} \\right| ^ {2} \\mathrm {d} t \\right]. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 155, + 146, + 482, + 181 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Sampling acceleration", + "text_level": 1, + "bbox": [ + 89, + 188, + 297, + 205 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "By incorporating explicit representation alignment into the encoder and implicit self-conditioning injection into the decoder, we achieve local consistency of $z_{t}$ across adjacent steps during training (shown in Fig. 5). This enables us to share $z_{t}$ within a suitable local range, reducing the computational burden on the self-mapping encoder.", + "bbox": [ + 89, + 210, + 483, + 301 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Formally, given total inference steps $N$ and encoder computation bugets $K$ , thus the sharing ratio is $1 - \\frac{K}{N}$ , we define $\\Phi$ with $|\\Phi| = K$ as the set of timesteps where the self-condition is recalculated, as shown in Equation 6. If the current timestep $t$ is not in $\\Phi$ , we reuse the previously computed $z_{t - \\Delta t}$ as $z_t$ . Otherwise, we recompute $z_t$ using the encoder and the current noisy latent $x_t$ :", + "bbox": [ + 89, + 301, + 483, + 407 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {z} _ {t} = \\left\\{ \\begin{array}{l l} \\boldsymbol {z} _ {t - \\Delta t}, & \\text {i f} t \\notin \\Phi \\\\ \\mathbf {E n c o d e r} \\left(\\boldsymbol {x} _ {t}, t, y\\right), & \\text {i f} t \\in \\Phi \\end{array} \\right. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 158, + 417, + 482, + 459 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Uniform Encoder Sharing. This naive approach recalculate self-condition $z_{t}$ every $\\frac{N}{K}$ steps. Previous work, such as DeepCache [33], uses this naive handcrafted uniform $\\Phi$ set to accelerate UNet models. However, UNet models, trained solely with a denoising loss and lacking robust representation alignment, exhibit less regularized local consistency in deeper features across adjacent steps compared to our DDT model. Also, we will propose a simple and elegant statistic dynamic programming algorithm to construct $\\Phi$ . Our statistic dynamic programming can exploit the optimal $\\Phi$ set optimally compared to the naive approaches [33].", + "bbox": [ + 89, + 474, + 483, + 642 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Statistic Dynamic Programming. We construct the statistic similarity matrix of $z_{t}$ among different steps $\\mathbf{S} \\in R^{N \\times N}$ using cosine distance. The optimal $\\Phi$ set would guarantee the total similarity cost $-\\sum_{k}^{K} \\sum_{i = \\Phi_{k}}^{\\Phi_{k + 1}} S[\\Phi_{k}, i]$ achieves global minimal. This question is a well-formed classic minimal sum path problem, it can be solved by dynamic programming. As shown in Eq. (8), we donate $\\mathbf{C}_{i}^{k}$ as cost and $\\mathbf{P}_{i}^{k}$ as traced path when $\\Phi_{k} = i$ . The state transition function from $\\mathbf{C}_{j}^{k - 1}$ to $\\mathbf{C}_{i}^{k}$ follows:", + "bbox": [ + 89, + 657, + 483, + 799 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {C} _ {i} ^ {k} = \\min _ {j = 0} ^ {i} \\left\\{\\mathbf {C} _ {j} ^ {k - 1} - \\Sigma_ {l = j} ^ {i} \\mathbf {S} [ j, l ] \\right\\}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 148, + 809, + 482, + 840 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {P} _ {i} ^ {k} = \\operatorname {a r g m i n} _ {j = 0} ^ {i} \\left\\{\\mathbf {C} _ {i} ^ {k - 1} - \\Sigma_ {l = j} ^ {i} \\mathbf {S} [ j, l ] \\right\\}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 151, + 840, + 482, + 862 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After obtaining the cost matrix $\\mathbf{C}$ and tracked path $\\mathbf{P}$ , the optimal $\\Phi$ can be solved by backtracking $\\mathbf{P}$ from $\\mathbf{P}_N^K$ .", + "bbox": [ + 89, + 869, + 482, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5. Experiment", + "text_level": 1, + "bbox": [ + 513, + 89, + 638, + 107 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We conduct experiments on 256x256 ImageNet datasets. The total training batch size is set to 256. Consistent with methodological approaches such as SiT [32], DiT [36], and REPA [52], we employed the Adam optimizer with a constant learning rate of 0.0001 throughout the entire training process. To ensure a fair comparative analysis, we did not use gradient clipping and learning rate warm-up techniques. Our default training infrastructure consisted of $16 \\times$ or $8 \\times$ A100 GPUs. For sampling, we take the Euler solver with 250 steps as the default choice. As for the VAE, we take the off-shelf VAE-ft-EMA with a downsample factor of 8 from Huggingface1. We report FID [18], sFID [34], IS [39], Precision and Recall [25].", + "bbox": [ + 511, + 114, + 906, + 311 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.1. Improved baselines", + "text_level": 1, + "bbox": [ + 511, + 318, + 699, + 334 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Recent architectural improvements such as SwiGLU [46, 47], RoPE [42], and RMSNorm [46, 47] have been extensively validated in the research community [8, 31, 50]. Additionally, lognorm sampling [12] has demonstrated significant benefits for training convergence. Consequently, we developed improved baseline models by incorporating these advanced techniques, drawing inspiration from recent works in the field. The performance of these improved baselines is comprehensively provided in Tab. 2. To validate the reliability of our implementation, we also reproduced the results for REPA-B/2, achieving metrics that marginally exceed those originally reported in the REPA[52]. These reproduction results provide additional confidence in the robustness of our approach.", + "bbox": [ + 511, + 340, + 906, + 551 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The improved baselines in our Tab. 2 consistently outperform their predecessors without REPA. However, upon implementing REPA, performance rapidly approaches a saturation point. This is particularly evident in the XL model size, where incremental technique improvements yield diminishingly small gains.", + "bbox": [ + 511, + 551, + 905, + 643 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.2. Metric comparison with baselines", + "text_level": 1, + "bbox": [ + 511, + 648, + 808, + 666 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We present the performances of different-size models at 400K training steps in Tab. 2. Our diffusion encoder-decoder transformer(DDT) family demonstrates consistent and significant improvements across various model sizes. Our DDT-B/2(8En4De) model exceeds Improved-REPA-B/2 by 2.8 FID gains. Our DDT-XL/2(22En6De) exceeds REPA-XL/2 by 1.3 FID gains. While the decoder-only diffusion transformers approach performance saturation with REPA[52], our DDT models continue to deliver superior results. The incremental technique improvements show diminishing gains, particularly in larger model sizes. However, our DDT models maintain a significant performance advantage, underscoring the effectiveness of our approach.", + "bbox": [ + 511, + 671, + 906, + 868 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "1https://huggingface.co/stabilityai/sd-vae-ft-ema", + "bbox": [ + 511, + 875, + 903, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/f252b04531ac99c22135065ab64a30e768c4a30b5909c3b088d0a7287ae98fa6.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ParamsEpochs256×256, w/o CFG256×256, w/ CFG
FID↓IS↑Pre.↑Rec.↑FID↓IS↑Pre.↑Rec.↑
MAR-B [28]208M8003.48192.40.780.582.31281.70.820.57
CausalFusion [9]368M8005.12166.10.730.661.94264.40.820.59
LDM-4 [38]400M17010.56103.50.710.623.6247.70.870.48
DDT-L (Ours)458M807.98128.10.680.671.64310.50.810.61
MAR-L [28]479M8002.6221.40.790.601.78296.00.810.60
VAVAE [50]675M8002.17205.60.770.651.35295.30.790.65
CausalFusion [9]676M8003.61180.90.750.661.77282.30.820.61
ADM [10]554M40010.94-0.690.634.59186.70.820.52
DiT-XL [36]675M14009.62121.50.670.672.27278.20.830.57
SiT-XL [32]675M14008.3---2.06270.30.820.59
ViT-XL [16]451M4008.10---2.06---
U-ViT-H/2 [2]501M4006.58---2.29263.90.820.57
MaskDiT [14]675M16005.69178.00.740.602.28276.60.800.61
FlowDCN [48]618M4008.36122.50.690.652.00263.10.820.58
RDM [44]553M/5.27153.40.750.621.99260.40.810.58
REPA [52]675M8005.9157.80.700.691.42305.70.800.64
DDT-XL (Ours)675M806.62135.20.690.671.52263.70.780.63
DDT-XL (Ours)675M2566.30146.70.680.681.31308.10.780.62
DDT-XL (Ours)675M4006.27154.70.680.691.26310.60.790.65
", + "bbox": [ + 138, + 88, + 854, + 421 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/38214edebabe768045a7736f9e2fe165033a6bea65d8cf66547cf0b5ea3afac5.jpg", + "table_caption": [ + "Table 1. System performance comparison on ImageNet ${256} \\times {256}$ class-conditioned generation. Gray blocks mean the algorithm uses VAE trained or fine-tuned on ImageNet instead of the off-shelf SD-VAE-f8d4-ft-ema." + ], + "table_footnote": [], + "table_body": "
ModelFID↓sFID↓IS↑Prec.↑Rec.↑
SiT-B/2 [32]33.06.4643.70.530.63
REPA-B/2 [52]24.46.4059.90.590.65
REPA-B/2(Reproduced)22.27.5069.10.590.65
DDT-B/2† (8En4De)21.17.8173.00.600.65
Improved-SiT-B/225.16.5458.80.570.64
Improved-REPA-B/219.16.8876.490.600.66
DDT-B/2 (8En4De)16.326.6386.00.620.66
SiT-L/2 [32]18.85.2972.00.640.64
REPA-L/2 [52]10.05.20109.20.690.65
Improved-SiT-L/212.75.4895.70.650.65
Improved-REPA-L/29.35.44116.60.670.66
DDT-L/2 (20En4De)7.985.50128.10.680.67
SiT-XL/2 [32]17.25.0776.520.650.63
REPA-XL/2 [52]7.95.06122.60.700.65
Improved-SiT-XL/210.95.3103.40.660.65
Improved-REPA-XL/28.145.34124.90.680.67
DDT-XL/2 (22En6De)6.624.86135.10.690.67
", + "bbox": [ + 107, + 464, + 468, + 750 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. Metrics of $400K$ training steps with different model sizes. All results are reported without classifier-free guidance. gray means metrics are copied from the original paper, otherwise it is produced by our codebase. By default, our DDT models are built on improved baselines. $\\mathrm{DDT}^{\\dagger}$ means model built on naive baseline without architecture improvement and lognorm sampling, consistent to REPA. Our DDT models consistently outperformed their counterparts.", + "bbox": [ + 89, + 760, + 482, + 872 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.3. System level comparison", + "text_level": 1, + "bbox": [ + 511, + 467, + 743, + 484 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ImageNet $256 \\times 256$ . We report the final metrics of DDT-XL/2 (22En6De) and DDT-L/2 (20En4De) at Tab. 1. Our DDT models demonstrate exceptional efficiency, achieving convergence in approximately $\\frac{1}{4}$ of the total epochs compared to REPA [52] and other diffusion transformer models. In order to maintain methodological consistency with REPA, we employed the classifier-free guidance with 2.0 in the interval [0.3, 1]. Our models delivered impressive results: DDT-L/2 achieved 1.64 FID, and DDT-XL/2 got 1.52 FID within just 80 epochs. By extending training to 256 epochs—still significantly more efficient than traditional 800-epoch approaches—our DDT-XL/2 established a new state-of-the-art benchmark of 1.31 FID on ImageNet $256 \\times 256$ , decisively outperforming previous diffusion transformer methodologies. To extend training to 400 epochs, our DDT-XL/2(22En6De) achieves 1.26 FID, nearly reaching the upper limit of SD-VAE-ft-EMA-f8d4, which has a 1.20 rFID on ImageNet256.", + "bbox": [ + 511, + 489, + 906, + 762 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ImageNet $512 \\times 512$ We provide the final metrics of DDT-XL/2 at Tab. 3. To validate the superiority of our DDT model, we take our DDT-XL/2 trained on ImageNet $256 \\times$ 256 under 256 epochs as the initialization, fine-tune out DDT-XL/2 on ImageNet $512 \\times 512$ for $100K$ steps. We adopt the aforementioned interval guidance [26] and we achieved a remarkable state-of-the-art performance of 1.90 FID, decisively outperforming REPA by a significant 0.28", + "bbox": [ + 511, + 779, + 908, + 902 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 925, + 504, + 936 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/c899eae3b28c35102cbbf666a0d15439efad97c88a461ea4e357d1a4523bec7a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ImageNet 512 × 512
ModelFID↓sFID↓IS↑Pre.↑Rec.↑
BigGAN-deep [3]8.438.13177.900.880.29
StyleGAN-XL [40]2.414.06267.750.770.52
ADM-G [10]7.726.57172.710.870.42
ADM-G, ADM-U3.855.86221.720.840.53
DiT-XL/2 [36]3.045.02240.820.840.54
SiT-XL/2 [32]2.624.18252.210.840.57
REPA-XL/2 [52]2.084.19274.60.830.58
FlowDCN-XL/2 [48]2.444.53252.80.840.54
DDT-XL/2 (500K)1.284.22305.10.800.63
", + "bbox": [ + 99, + 88, + 475, + 268 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "performance margin. In Tab. 3, some metrics exhibit subtle degradation, we attribute this to potentially insufficient fine-tuning. When allocating more training iterations to DDT-XL/2, it achieves 1.28 FID at 500K steps with CFG3.0 within the time interval [0.3, 1.0].", + "bbox": [ + 89, + 353, + 483, + 431 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.4. Acceleration by Encoder sharing", + "text_level": 1, + "bbox": [ + 89, + 441, + 380, + 458 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As illustrated in Fig. 5, there is a strong local consistency of the self-condition in our condition encoder. Even $\\boldsymbol{z}_{t=0}$ has a strong similarity above 0.8 with $\\boldsymbol{z}_{t=1}$ . This consistency provides an opportunity to speed up inference by sharing the encoder between adjacent steps.", + "bbox": [ + 89, + 464, + 483, + 541 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We employed the simple uniform encoder sharing strategy and the new novel statistics dynamic programming strategy. Specifically, for the uniform strategy, we only recalculate the self-condition $z_{t}$ every $K$ steps. For statistics dynamic programming, we solve the aforementioned minimal sum path on the similarity matrix by dynamic programming and recalculate $z_{t}$ according to the solved strategy. As shown in Fig. 6, there is a significant inference speedup nearly without visual quality loss when $K$ is smaller than 6. As shown in Tab. 4, the metrics loss is still marginal, while the inference speedup is significant. The novel statistics dynamic programming slightly outperformed the naive uniform strategy with less FID drop.", + "bbox": [ + 89, + 541, + 483, + 737 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.5. Ablations", + "text_level": 1, + "bbox": [ + 89, + 748, + 202, + 763 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We conduct ablation studies on ImageNet $256 \\times 256$ with DDT-B/2 and DDT-L/2. For sampling, we take the Euler solver with 250 steps as the default choice without classifier-free guidance. For training, we train each model with 80 epochs(400k steps), and the batch size is set to 256.", + "bbox": [ + 89, + 771, + 482, + 848 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Encoder-Decoder Ratio we systematically explored ratios ranging from $2:1$ to $5:1$ across different model sizes.", + "bbox": [ + 89, + 869, + 483, + 902 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/6aeb8ad5d07d128fe9a1397a63a04c9474c9d30a1f1f63a493b7426b0b7ca35a.jpg", + "table_caption": [ + "Table 3. Benchmarking class-conditional image generation on ImageNet $512 \\times 512$ . Our DDT-XL/2(512 × 512) is fine-tuned from the same model trained on $256 \\times 256$ resolution setting of 1.28M steps. We adopt the interval guidance with interval [0.3, 1] and CFG of 3.0" + ], + "table_footnote": [], + "table_body": "
SharRatioAccΦFID↓sFID↓IS↑Prec.↑Rec.↑
0.001.0×Uniform1.314.62308.10.780.66
0.501.6×Uniform1.314.48300.50.780.65
0.661.9×Uniform1.324.46301.20.780.65
0.752.3×Uniform1.344.43302.70.780.65
0.802.6×Uniform1.364.40303.30.780.64
StatisticDP1.334.37301.70.780.64
0.832.7×Uniform1.374.41302.80.780.64
StatisticDP1.364.35300.30.780.64
0.873.0×Uniform1.424.43302.80.780.64
StatisticDP1.404.35302.40.780.64
", + "bbox": [ + 517, + 88, + 903, + 277 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 4. Metrics of ${400K}$ training steps with different model sizes. All results are reported without classifier-free guidance. gray means metrics are copied from the original paper, otherwise it is produced by our codebase. Our DDT models consistently outperformed its counterparts", + "bbox": [ + 511, + 286, + 906, + 357 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/033d63fcd85c9681fb31fefd5873fed342d5bf14138f45e31589702949bc1243.jpg", + "image_caption": [ + "Figure 5. The cosine similarity of self-condition feature $z_{t}$ from encoder between different timesteps. There is a strong correlation between adjacent steps, indicating the redundancy." + ], + "image_footnote": [], + "bbox": [ + 527, + 366, + 908, + 604 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ee4c2612ac81285275068e95259e898b96d23d847186f0bc23ef8c53d733f70c.jpg", + "image_caption": [ + "Figure 6. Sharing the self-condition $z_{t}$ in adjacent steps significant speedup the inference. We tried various sharing frequency configurations. There is marginal visual quality down-gradation when the sharing frequency is reasonable." + ], + "image_footnote": [], + "bbox": [ + 522, + 666, + 900, + 795 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "in Fig. 7 and Fig. 8. Our notation $m\\mathrm{En}n\\mathrm{De}$ represents models with $m$ encoder layers and $n$ decoder layers. The inves", + "bbox": [ + 511, + 869, + 906, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/407f5b5ff8f50c1416dcfbeafdd2fea4269a384cafb24e536a03e1635a638c3b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 106, + 90, + 362, + 242 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2757235a519a66a5a620a7b238d4836051bbd2024c54007acd28fc92d3a5a40d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 372, + 90, + 630, + 242 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4b8104cb4cd6afc445f9075381ba25c807552c05b2bdf48c9ed38b7d6ca8600f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 638, + 97, + 895, + 242 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/444038e1ba194d39dcc047006d8249c0659c38455fcbbb140ca296cf75f66b58.jpg", + "image_caption": [ + "Figure 7. The DDT-B/2 built upon Improved-baselines under various Encoder and Decoder layer ratio. DDT-B/2(8En4De) achieves much faster convergence speed and better performance.", + "Figure 8. The DDT-L/2 built upon Improved-baselines under various Encoder and Decoder layer ratio. DDT-L/2 prefers an unexpected aggressive encoder-decoder ratio DDT-L/2(20En4De) achieves much faster convergence speed and better performance." + ], + "image_footnote": [], + "bbox": [ + 106, + 282, + 362, + 431 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ead4974e709c35db97c6cbfb7ffa7b61e5d9ea3ee2de853402c30d478dacb855.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 372, + 284, + 629, + 431 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0107bf7a532ff9c9a3c73a46031f8f1ad26badac4b3d1c14d2d9de1f95c1568a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 638, + 287, + 895, + 431 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "tigation experiments in Fig. 7 and Fig. 8 revealed critical insights into architectural optimization. We observed that a larger encoder is beneficial for further improving the performance as the model size increases. For the Base model in Fig. 7, the optimal configuration emerged as 8 encoder layers and 4 decoder layers, delivering superior performance and convergence speed. Notably, the Large model in Fig. 8 exhibited a distinct preference, achieving peak performance with 20 encoder layers and 4 decoder layers, an unexpectedly aggressive encoder-decoder ratio. This unexpected discovery motivates us to scale the layer ratio in DDT-XL/2 to 22 encoder layers and 6 decoders to explore the performance upper limits of diffusion transformers.", + "bbox": [ + 89, + 482, + 482, + 678 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Decoder Block types. In our investigation of decoder block types and their impact on high-frequency decoding performance, we systematically evaluated multiple architectural configurations. Our comprehensive assessment included alternative approaches such as simple $3 \\times 3$ convolution blocks and naive MLP blocks. As shown in Tab. 5, the default (Attention with the MLP) setting achieves better results. Thanks to the encoder-decoder design, naive Conv blocks even achieve comparable results.", + "bbox": [ + 89, + 696, + 482, + 833 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 89, + 847, + 209, + 862 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we have introduced a novel Decoupled Diffusion Transformer, which rethinks the optimization dilemma", + "bbox": [ + 89, + 871, + 482, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/d644e55597e5568b650ac9c7f700087e5b4a931d95fdf3b462fa03c854c68e3b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DecoderBlockFID↓sFID↓IS↑Prec.↑Rec.↑
Conv+MLP16.967.3385.10.620.65
MLP+MLP24.137.8965.00.570.65
Attn+MLP16.326.6386.00.620.66
", + "bbox": [ + 553, + 479, + 867, + 551 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 5. Metrics of $400K$ training steps on DDT-B/2(8En4De) with different decoder blocks. All results are reported without classifier-free guidance. The Default Attention + MLP configuration achieves best performance.", + "bbox": [ + 511, + 561, + 906, + 617 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "of the traditional diffusion transformer. By decoupling the low-frequency encoding and high-frequency decoding into dedicated components, we effectively resolved the optimization dilemma that has constrained diffusion transformer. Furthermore, we discovered that increasing the encoder capacity relative to the decoder yields increasingly beneficial results as the overall model scale grows. This insight provides valuable guidance for future model scaling efforts. Our experiments demonstrate that our DDT-XL/2 (22En6De) with an unexpected aggressive encoder-decoder layer ratio achieves great performance while requiring only 256 training epochs. This significant improvement in efficiency addresses one of the primary limitations of diffusion models: their lengthy training requirements. The decoupled architecture also presents opportunities for inference optimization through our proposed encoder result sharing mechanism. Our statistical dynamic programming approach for determining optimal sharing strategies enables faster inference while minimizing quality", + "bbox": [ + 511, + 636, + 906, + 902 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "degradation, demonstrating that architectural innovations can yield benefits beyond their primary design objectives.", + "bbox": [ + 89, + 90, + 485, + 122 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 146, + 187, + 161 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Niket Agarwal, Arslan Ali, Maciej Bala, Yogesh Balaji, Erik Barker, Tiffany Cai, Prithvijit Chattopadhyay, Yongxin Chen, Yin Cui, Yifan Ding, et al. Cosmos world foundation model platform for physical ai. arXiv preprint arXiv:2501.03575, 2025. 2", + "[2] Fan Bao, Shen Nie, Kaiwen Xue, Yue Cao, Chongxuan Li, Hang Su, and Jun Zhu. All are worth words: A vit backbone for diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22669-22679, 2023. 2, 6", + "[3] Andrew Brock, Jeff Donahue, and Karen Simonyan. Large scale gan training for high fidelity natural image synthesis. arXiv preprint arXiv:1809.11096, 2018. 1, 7", + "[4] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. 2", + "[5] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11315-11325, 2022. 1", + "[6] Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al. Pixart- $\\cdot$ alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis. arXiv preprint arXiv:2310.00426, 2023. 2", + "[7] Junsong Chen, Chongjian Ge, Enze Xie, Yue Wu, Lewei Yao, Xiaozhe Ren, Zhongdao Wang, Ping Luo, Huchuan Lu, and Zhenguo Li. Pixart- $\\backslash$ sigma: Weak-to-strong training of diffusion transformer for 4k text-to-image generation. arXiv preprint arXiv:2403.04692, 2024. 2", + "[8] Xiangxiang Chu, Jianlin Su, Bo Zhang, and Chunhua Shen. Visionllama: A unified llama interface for vision tasks. arXiv preprint arXiv:2403.00522, 2024. 5", + "[9] Chaorui Deng, Deyao Zh, Kunchang Li, Shi Guan, and Haoqi Fan. Causal diffusion transformers for generative modeling. arXiv preprint arXiv:2412.12095, 2024. 6, 12", + "[10] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 2, 6, 7, 11", + "[11] Sander Dieleman. Diffusion is spectral autoregression, 2024. 3, 4", + "[12] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. arXiv preprint arXiv:2403.03206, 2024. 2, 5", + "[13] Zhengcong Fei, Mingyuan Fan, Changqian Yu, Debang Li, and Junshi Huang. Diffusion-rwkv: Scaling rwkv-like architectures for diffusion models. arXiv preprint arXiv:2404.04478, 2024. 2" + ], + "bbox": [ + 93, + 172, + 482, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Masked diffusion transformer is a strong image synthesizer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 23164-23173, 2023. 3, 6", + "[15] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Masked diffusion transformer is a strong image synthesizer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23164-23173, 2023. 3", + "[16] Tiankai Hang, Shuyang Gu, Chen Li, Jianmin Bao, Dong Chen, Han Hu, Xin Geng, and Baining Guo. Efficient diffusion training via min-snr weighting strategy. In Proceedings of the IEEE/CVF international conference on computer vision, pages 7441-7451, 2023. 2, 6", + "[17] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollar, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 16000-16009, 2022. 2", + "[18] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 5", + "[19] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 1", + "[20] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. arXiv preprint arXiv:2205.15868, 2022. 2", + "[21] Tero Karras, Miika Aittala, Timo Aila, and Samuli Laine. Elucidating the design space of diffusion-based generative models. Advances in Neural Information Processing Systems, 35:26565-26577, 2022. 1", + "[22] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 11", + "[23] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4015-4026, 2023. 2", + "[24] Weijie Kong, Qi Tian, Zijian Zhang, Rox Min, Zuozhuo Dai, Jin Zhou, Jiangfeng Xiong, Xin Li, Bo Wu, Jianwei Zhang, et al. Hunyuanvideo: A systematic framework for large video generative models. arXiv preprint arXiv:2412.03603, 2024. 2", + "[25] Tuomas Kynkänniemi, Tero Karras, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Improved precision and recall metric for assessing generative models. Advances in neural information processing systems, 32, 2019. 5", + "[26] Tuomas Kynkänniemi, Miika Aittala, Tero Karras, Samuli Laine, Timo Aila, and Jaakko Lehtinen. Applying guidance in a limited interval improves sample and distribution quality in diffusion models. arXiv preprint arXiv:2404.07724, 2024. 6" + ], + "bbox": [ + 516, + 92, + 903, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[27] Tianhong Li, Dina Katabi, and Kaiming He. Return of unconditional generation: A self-supervised representation generation method. Advances in Neural Information Processing Systems, 37:125441-125468, 2024. 3", + "[28] Tianhong Li, Yonglong Tian, He Li, Mingyang Deng, and Kaiming He. Autoregressive image generation without vector quantization. Advances in Neural Information Processing Systems, 37:56424-56445, 2025. 2, 4, 6", + "[29] Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le. Flow matching for generative modeling. arXiv preprint arXiv:2210.02747, 2022. 1, 3", + "[30] Xingchao Liu, Chengyue Gong, and Qiang Liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022. 1, 3", + "[31] Zeyu Lu, Zidong Wang, Di Huang, Chengyue Wu, Xihui Liu, Wanli Ouyang, and Lei Bai. Fit: Flexible vision transformer for diffusion model. arXiv preprint arXiv:2402.12376, 2024.5", + "[32] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. Sit: Exploring flow and diffusion-based generative models with scalable interpolant transformers. arXiv preprint arXiv:2401.08740, 2024. 2, 3, 5, 6, 7", + "[33] Xinyin Ma, Gongfan Fang, and Xinchao Wang. Deepcache: Accelerating diffusion models for free. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 15762-15772, 2024. 5", + "[34] Charlie Nash, Jacob Menick, Sander Dieleman, and Peter W Battaglia. Generating images with sparse representations. arXiv preprint arXiv:2103.03841, 2021. 5", + "[35] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 4", + "[36] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 2, 4, 5, 6, 7", + "[37] Severi Rissanen, Markus Heinonen, and Arno Solin. Generative modelling with inverse heat dissipation. arXiv preprint arXiv:2206.13397, 2022. 3, 4", + "[38] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2, 6", + "[39] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 5", + "[40] Axel Sauer, Katja Schwarz, and Andreas Geiger. Styleganx1: Scaling stylegan to large diverse datasets. In ACM SIGGRAPH 2022 conference proceedings, pages 1-10, 2022. 1, 7", + "[41] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based" + ], + "bbox": [ + 91, + 90, + 485, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020. 1", + "[42] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. 5", + "[43] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024. 1", + "[44] Jiayan Teng, Wendi Zheng, Ming Ding, Wenyi Hong, Jianqiao Wangni, Zhuoyi Yang, and Jie Tang. Relay diffusion: Unifying diffusion process across resolutions for image synthesis. arXiv preprint arXiv:2309.03350, 2023. 6", + "[45] Yao Teng, Yue Wu, Han Shi, Xuefei Ning, Guohao Dai, Yu Wang, Zhenguo Li, and Xihui Liu. Dim: Diffusion mamba for efficient high-resolution image synthesis. arXiv preprint arXiv:2405.14224, 2024. 2", + "[46] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. 5", + "[47] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. 5", + "[48] Shuai Wang, Zexian Li, Tianhui Song, Xubin Li, Tiezheng Ge, Bo Zheng, and Limin Wang. Flowdcn: Exploring dcn-like architectures for fast image generation with arbitrary resolution. arXiv preprint arXiv:2410.22655, 2024. 2, 6, 7", + "[49] Jing Nathan Yan, Jiatao Gu, and Alexander M Rush. Diffusion models without attention. arXiv preprint arXiv:2311.18257, 2023. 2", + "[50] Jingfeng Yao and Xinggang Wang. Reconstruction vs. generation: Taming optimization dilemma in latent diffusion models. arXiv preprint arXiv:2501.01423, 2025. 5, 6", + "[51] Qihang Yu, Ju He, Xueqing Deng, Xiaohui Shen, and Liang-Chieh Chen. Randomized autoregressive visual generation. arXiv preprint arXiv:2411.00776, 2024. 1", + "[52] Sihyun Yu, Sangkyung Kwak, Huiwon Jang, Jongheon Jeong, Jonathan Huang, Jinwoo Shin, and Saining Xie. Representation alignment for generation: Training diffusion transformers is easier than you think. arXiv preprint arXiv:2410.06940, 2024. 2, 3, 4, 5, 6, 7, 12", + "[53] Xiaoyu Yue, Zidong Wang, Zeyu Lu, Shuyang Sun, Meng Wei, Wanli Ouyang, Lei Bai, and Luping Zhou. Diffusion models need visual priors for image generation. arXiv preprint arXiv:2410.08531, 2024. 2, 3", + "[54] Le Zhuo, Ruoyi Du, Han Xiao, Yangguang Li, Dongyang Liu, Rongjie Huang, Wenze Liu, Lirui Zhao, Fu-Yun Wang, Zhanyu Ma, et al. Lumina-last: Making lumina-t2x stronger and faster with next-dit. arXiv preprint arXiv:2406.18583, 2024. 2" + ], + "bbox": [ + 516, + 92, + 903, + 868 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "A. Model Specs", + "text_level": 1, + "bbox": [ + 91, + 89, + 225, + 107 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/ba57dee1c812d45caeb0fdacc9dbbc045f6fbdb63a51e0e248e9d6101fefe26e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Config#LayersHidden dim#Heads
B/21276812
L/224102416
XL/228115216
", + "bbox": [ + 133, + 126, + 441, + 203 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "B. Hyper-parameters", + "text_level": 1, + "bbox": [ + 91, + 241, + 274, + 258 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/72b79304d4617c22a6598c0b47f0a98af50e4c7399f5c40fbd5541f405d454d1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
VAE\nVAE donwsample\nlatent channelSD-VAE-f8d4-ft-ema\n8\n4
optimizerAdamW [22]
base learning rate1e-4
weight decay0.0
batch size256
learning rate scheduleconstant
augmentationcenter crop
diffusion samplerEuler-ODE
diffusion steps250
evaluation suiteADM [10]
", + "bbox": [ + 122, + 277, + 450, + 483 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "C. Linear flow and Diffusion", + "text_level": 1, + "bbox": [ + 89, + 506, + 334, + 522 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Given the SDE forward and reverse process:", + "bbox": [ + 89, + 531, + 383, + 547 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nd \\boldsymbol {x} _ {t} = f (t) \\boldsymbol {x} _ {t} \\mathrm {d} t + g (t) \\mathrm {d} \\boldsymbol {w} \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 107, + 553, + 480, + 570 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nd \\boldsymbol {x} _ {t} = [ f (t) \\boldsymbol {x} _ {t} - g (t) ^ {2} \\nabla_ {\\boldsymbol {x}} \\log p (\\boldsymbol {x} _ {t}) ] d t + g (t) d \\boldsymbol {w} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 107, + 573, + 480, + 590 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A corresponding deterministic process exists with trajectories sharing the same marginal probability densities of reverse SDE.", + "bbox": [ + 89, + 597, + 482, + 641 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nd \\boldsymbol {x} _ {t} = [ f (t) \\boldsymbol {x} _ {t} - \\frac {1}{2} g (t) ^ {2} \\nabla_ {\\boldsymbol {x} _ {t}} \\log p (\\boldsymbol {x} _ {t}) ] d t \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 147, + 643, + 482, + 672 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Given $x_{t} = \\alpha_{t}x_{data} + \\sigma \\epsilon$ . The traditional diffusion model learns:", + "bbox": [ + 89, + 678, + 482, + 705 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {\\boldsymbol {x} _ {t}} \\log p (\\boldsymbol {x} _ {t}) = - \\frac {\\epsilon}{\\sigma (t)} \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 205, + 704, + 480, + 733 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The flow-matching framework actually learns the following:", + "bbox": [ + 89, + 734, + 482, + 763 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\boldsymbol {v} _ {t} = \\dot {\\alpha} x + \\dot {\\sigma} \\epsilon (13) \\\\ = x - \\epsilon (14) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 236, + 772, + 480, + 805 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Here we will demonstrate in flow-matching, the $\\boldsymbol{v}_t$ prediction is actually as same as the reverse ode:", + "bbox": [ + 89, + 813, + 480, + 842 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\dot {\\alpha} x + \\dot {\\sigma} \\epsilon (15) \\\\ = f (t) \\boldsymbol {x} _ {t} - \\frac {1}{2} g (t) ^ {2} \\nabla_ {\\boldsymbol {x} _ {t}} \\log p (\\boldsymbol {x} _ {t}) (16) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 176, + 849, + 480, + 898 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Let us start by expanding the reverse ode first.", + "bbox": [ + 532, + 90, + 836, + 106 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} f (t) \\boldsymbol {x} _ {t} - \\frac {1}{2} g (t) ^ {2} \\nabla_ {\\boldsymbol {x} _ {t}} \\log p (\\boldsymbol {x} _ {t}) (17) \\\\ = f (t) (\\alpha (t) \\boldsymbol {x} _ {\\text {d a t a}} + \\sigma (t) \\epsilon) - \\frac {1}{2} g (t) ^ {2} \\left[ - \\frac {\\epsilon}{\\sigma (t)} \\right] (18) \\\\ = f (t) \\alpha (t) \\boldsymbol {x} _ {\\text {d a t a}} + (f (t) \\sigma (t) + \\frac {1}{2} \\frac {g (t) ^ {2}}{\\sigma (t)}) \\epsilon (19) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 545, + 123, + 903, + 222 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "To prove Eq. (16), we needs to demonstrate that:", + "bbox": [ + 532, + 242, + 854, + 257 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\dot {\\alpha} (t) = f _ {t} \\alpha (t) \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 624, + 276, + 903, + 292 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\dot {\\sigma} (t) = f _ {t} \\sigma (t) + \\frac {1}{2} \\frac {g _ {t} ^ {2}}{\\sigma (t)}. \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 625, + 296, + 903, + 329 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Here, let us derive the relation between $f_{t}$ and $\\alpha(t)$ , $\\dot{\\alpha}(t)$ . We donate $x_{data}(t) = \\alpha(t)x_{data}$ is the remain component of $x_{data}$ in $x_{t}$ , it is easy to find that:", + "bbox": [ + 511, + 351, + 903, + 396 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nd \\boldsymbol {x} _ {\\text {d a t a}} (t) = f _ {t} \\boldsymbol {x} _ {\\text {d a t a}} (t) d t \\tag {22}\n$$\n", + "text_format": "latex", + "bbox": [ + 627, + 416, + 903, + 431 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nd \\left(\\alpha (t) x _ {d a t a}\\right) = f _ {t} \\alpha (t) x _ {d a t a} d t \\tag {23}\n$$\n", + "text_format": "latex", + "bbox": [ + 607, + 434, + 903, + 450 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nd \\alpha (t) = f _ {t} \\alpha (t) d t \\tag {24}\n$$\n", + "text_format": "latex", + "bbox": [ + 656, + 454, + 903, + 469 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "So, Eq. (20) is right.", + "bbox": [ + 513, + 489, + 651, + 503 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Based on the above equation, we will demonstrate the relation of $g_{t}, f_{t}$ with $\\sigma(t)$ . Note that Gaussian noise has nice additive properties.", + "bbox": [ + 511, + 507, + 905, + 553 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\na \\epsilon_ {1} + b \\epsilon_ {2} \\in \\mathcal {N} \\left(0, \\sqrt {a ^ {2} + b ^ {2}}\\right) \\tag {25}\n$$\n", + "text_format": "latex", + "bbox": [ + 609, + 571, + 903, + 590 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Let us start with the gaussian noise component $\\epsilon(t)$ calculation, reaching at $t$ , every noise addition at $s \\in [0, t]$ while been decayed by a factor of $\\frac{\\alpha(t)}{\\alpha(s)}$ . Thus, the mixed Gaussian noise will have a std variance $\\sigma(t)$ of:", + "bbox": [ + 511, + 609, + 905, + 674 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\sigma (t) = \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {\\alpha (t)}{\\alpha (s)}\\right) ^ {2} g _ {s} ^ {2} \\right] d s\\right)} \\tag {26}\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 693, + 903, + 734 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\sigma (t) = \\alpha (t) \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {s}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)} \\tag {27}\n$$\n", + "text_format": "latex", + "bbox": [ + 601, + 737, + 903, + 777 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "After obtaining the relation of $f_{t}, g_{t}$ and $\\alpha(t), \\sigma(t)$ , we derive $\\dot{\\alpha}(t)$ and $\\dot{\\sigma}(t)$ with above conditions:", + "bbox": [ + 511, + 794, + 903, + 825 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\dot {\\alpha} (t) = f _ {t} \\exp \\left[ \\int_ {0} ^ {t} f _ {s} d s \\right] \\tag {28}\n$$\n", + "text_format": "latex", + "bbox": [ + 627, + 844, + 903, + 878 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\dot {\\alpha} (t) = f _ {t} \\alpha (t) \\tag {29}\n$$\n", + "text_format": "latex", + "bbox": [ + 629, + 880, + 903, + 897 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "As for $\\dot{\\sigma} (t)$ , it is quit complex but not hard:", + "bbox": [ + 89, + 90, + 379, + 106 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\dot {\\sigma} (t) = \\dot {\\alpha} (t) \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)} + \\alpha (t) \\frac {\\frac {1}{2} \\frac {g _ {t} ^ {2}}{\\alpha (t)}}{\\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} g _ {s} ^ {2} \\right] d s\\right)}} \\tag {30}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 112, + 493, + 176 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\dot {\\sigma} (t) = \\left(f _ {t} \\alpha (t)\\right) \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)} + \\alpha (t) \\frac {\\frac {1}{2} \\frac {g _ {t} ^ {2}}{\\alpha^ {2} (t)}}{\\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)}} \\tag {31}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 181, + 503, + 246 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\dot {\\sigma} (t) = f _ {t} \\alpha (t) \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)} + \\frac {\\frac {1}{2} g _ {t} ^ {2}}{\\alpha (t) \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)}} \\tag {32}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 250, + 490, + 314 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\dot {\\sigma} (t) = f _ {t} \\sigma (t) + \\frac {1}{2} \\frac {g t}{\\sigma (t)} \\tag {33}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 316, + 483, + 349 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "So, Eq. (21) is right.", + "bbox": [ + 89, + 353, + 227, + 369 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "D. Proof of Spectrum Autoregressive", + "text_level": 1, + "bbox": [ + 89, + 380, + 401, + 398 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Given the noise scheduler $\\{\\alpha_{t},\\sigma_{t}\\}$ , the clean data $\\pmb{x}_{\\mathrm{data}}$ and Gaussian noise $\\epsilon$ . Denote $K_{freq}$ as the maximum frequency of the clean data $\\pmb{x}_{\\mathrm{data}}$ . The noisy latent $x_{t}$ at timestep $t$ has been defined as:", + "bbox": [ + 89, + 405, + 483, + 465 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {x} _ {t} = \\alpha_ {t} \\boldsymbol {x} _ {\\text {d a t a}} + \\sigma_ {t} \\boldsymbol {\\epsilon} \\tag {34}\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 474, + 480, + 489 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The spectrum magnitude $c_{i}$ of $x_{t}$ on DCT basics $\\mathbf{u}_{i}$ follows:", + "bbox": [ + 89, + 494, + 483, + 523 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {c} _ {i} = \\mathbb {E} _ {\\epsilon} \\left[ \\boldsymbol {u} _ {i} ^ {T} \\boldsymbol {x} _ {t} \\right] ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 183, + 530, + 295, + 549 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {c} _ {i} = \\mathbb {E} _ {\\epsilon} [ \\boldsymbol {u} _ {i} ^ {T} (\\alpha_ {t} \\boldsymbol {x} _ {d a t a} + \\sigma_ {t} \\boldsymbol {\\epsilon}) ] ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 551, + 388, + 570 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Recall that the spectrum magnitude of Gaussian noise $\\epsilon$ is uniformly distributed.", + "bbox": [ + 89, + 575, + 483, + 606 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\pmb {c} _ {i} = [ \\alpha_ {t} \\pmb {u} _ {i} ^ {T} \\pmb {x} _ {d a t a} ] ^ {2} + 2 \\alpha_ {t} \\sigma_ {t} \\mathbb {E} _ {\\epsilon} [ \\pmb {u} _ {i} ^ {T} \\pmb {x} _ {d a t a} \\pmb {u} _ {i} ^ {T} \\epsilon ] + \\sigma_ {t} ^ {2} \\mathbb {E} _ {\\epsilon} [ \\pmb {u} _ {i} ^ {T} \\epsilon ] ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 625, + 493, + 645 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {c} _ {i} = \\left[ \\alpha_ {t} \\boldsymbol {u} _ {i} ^ {T} \\boldsymbol {x} _ {\\text {d a t a}} \\right] ^ {2} + \\sigma_ {t} ^ {2} \\mathbb {E} _ {\\boldsymbol {\\epsilon}} \\left[ \\boldsymbol {u} _ {i} ^ {T} \\boldsymbol {\\epsilon} \\right] ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 646, + 321, + 665 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {c} _ {i} = \\alpha_ {t} ^ {2} \\left[ \\boldsymbol {u} _ {i} ^ {T} \\boldsymbol {x} _ {\\text {d a t a}} \\right] ^ {2} + \\sigma_ {t} ^ {2} \\lambda\n$$\n", + "text_format": "latex", + "bbox": [ + 91, + 667, + 272, + 686 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "if $\\sigma_t^2\\lambda$ has bigger value than $[\\alpha_t\\pmb{u}_i^T\\pmb{x}_{data}]^2$ , the spectrum magnitude $\\pmb{c}_i$ on DCT basics $\\pmb{u}_i$ will be canceled, thus the maximal remaining frequency $f_{max}(t)$ of original data in $\\pmb{x}_t$ follows:", + "bbox": [ + 89, + 691, + 483, + 753 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nf _ {\\max } (t) > \\min \\left(\\left(\\frac {\\alpha_ {t} \\boldsymbol {u} _ {i} ^ {T} \\boldsymbol {x} _ {\\text {d a t a}}}{\\sigma_ {t} \\lambda}\\right) ^ {2}, K _ {\\text {f r e q}}\\right) \\tag {35}\n$$\n", + "text_format": "latex", + "bbox": [ + 125, + 758, + 483, + 800 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Though $\\frac{\\alpha_t\\pmb{u}_i^T\\pmb{x}_{data}}{\\sigma_t\\lambda}^2$ depends on the dataset. Here, we directly suppose it as a constant 1. And replace $\\alpha = t$ and $\\sigma = 1 - t$ in above equation:", + "bbox": [ + 89, + 806, + 483, + 858 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nf _ {\\max } (t) > \\min \\left(\\left(\\frac {t}{1 - t}\\right) ^ {2}, K _ {f r e q}\\right) \\tag {36}\n$$\n", + "text_format": "latex", + "bbox": [ + 158, + 863, + 483, + 905 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "E. Linear multisteps method", + "text_level": 1, + "bbox": [ + 513, + 90, + 758, + 107 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We conduct targeted experiment on SiT-XL/2 with Adams-Bashforth like linear multistep solver; To clarify, we did not employ this powerful solver for our DDT models in all tables across the main paper.", + "bbox": [ + 511, + 114, + 905, + 175 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The reverse ode of the diffusion models tackles the following integral:", + "bbox": [ + 511, + 175, + 905, + 205 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {x} _ {i + 1} = \\boldsymbol {x} _ {i} + \\int_ {t _ {i}} ^ {t _ {i + 1}} \\boldsymbol {v} _ {\\theta} (\\boldsymbol {x} _ {t}, t) d t \\tag {37}\n$$\n", + "text_format": "latex", + "bbox": [ + 601, + 215, + 905, + 251 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The classic Euler method employs $\\pmb{v}_{\\theta}(\\pmb{x}_i, t_i)$ as an estimate of $\\pmb{v}_{\\theta}(\\pmb{x}_t, t)$ throughout the interval $[t_i, t_{i+1}]$", + "bbox": [ + 511, + 262, + 905, + 294 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {x} _ {i + 1} = \\boldsymbol {x} _ {i} + \\left(t _ {i + 1} - t _ {i}\\right) \\boldsymbol {v} _ {\\theta} \\left(\\boldsymbol {x} _ {i}, t _ {i}\\right). \\tag {38}\n$$\n", + "text_format": "latex", + "bbox": [ + 589, + 305, + 903, + 321 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The most classic multi-step solver Adams-Bashforth method (deemed as Adams for brevity) incorporates the Lagrange polynomial to improve the estimation accuracy with previous predictions.", + "bbox": [ + 511, + 333, + 905, + 393 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {v} _ {\\theta} (\\boldsymbol {x} _ {t}, t) = \\sum_ {j = 0} ^ {i} (\\prod_ {k = 0, k \\neq j} ^ {i} \\frac {t - t _ {k}}{t _ {j} - t _ {k}}) \\boldsymbol {v} _ {\\theta} (\\boldsymbol {x} _ {j}, t _ {j})\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 405, + 808, + 448 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {x} _ {i + 1} \\approx \\boldsymbol {x} _ {i} + \\int_ {t _ {i}} ^ {t _ {i + 1}} \\sum_ {j = 0} ^ {i} (\\prod_ {k = 0, k \\neq j} ^ {i} \\frac {t - t _ {k}}{t _ {j} - t _ {k}}) \\boldsymbol {v} _ {\\theta} (\\boldsymbol {x} _ {j}, t _ {j}) d t\n$$\n", + "text_format": "latex", + "bbox": [ + 542, + 450, + 901, + 494 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {x} _ {i + 1} \\approx \\boldsymbol {x} _ {i} + \\sum_ {j = 0} ^ {i} \\boldsymbol {v} _ {\\theta} (\\boldsymbol {x} _ {j}, t _ {j}) \\int_ {t _ {i}} ^ {t _ {i + 1}} \\left(\\prod_ {k = 0, k \\neq j} ^ {i} \\frac {t - t _ {k}}{t _ {j} - t _ {k}}\\right) d t\n$$\n", + "text_format": "latex", + "bbox": [ + 540, + 497, + 905, + 540 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Note that $\\int_{t_i}^{t_{i + 1}}\\left(\\prod_{k = 0,k\\neq j}^i\\frac{t - t_k}{t_j - t_k}\\right)dt$ of the Lagrange polynomial can be pre-integrated into a constant coefficient, resulting in only naive summation being required for ODE solving.", + "bbox": [ + 511, + 551, + 905, + 616 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "F. Classifier free guidance.", + "text_level": 1, + "bbox": [ + 513, + 628, + 740, + 646 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "As classifier-free guidance significantly impacts the performance of diffusion models. Traditional classifier-free guidance improves performance at the cost of decreased diversity. Interval guidance is recently been adopted by REPA[52] and Causalfusion[9]. It applies classifier-free guidance only to the high-frequency generation phase to preserve the diversity. We sweep different classifier-free guidance strength with selected intervals. Our DDT-XL/2 achieves the best performance with interval [0.3, 1] with a classifier-free guidance of 2. Recall that we donate $t = 0$ as the pure noise timestep while REPA[52] use $t = 1$ , thus this exactly corresponds to the [0, 0.7] interval in REPA[52]", + "bbox": [ + 511, + 654, + 906, + 837 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/d832ac8214ead6291814ec65b36289871ce8a3fe75d47d1a9b200cf3022d9f90.jpg", + "image_caption": [ + "Classifier-free guidance with intervals", + "Figure 9. FID10K of DDT-XL/2 with different Classifier free guidance strength and guidance intervals. We sweep different classifier-free guidance strength with selected intervals. Our DDT-XL/2 achieves the best performance with interval [0.3, 1] with a classifier-free guidance of 2." + ], + "image_footnote": [], + "bbox": [ + 96, + 351, + 475, + 577 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 12 + } +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05741/97d4b9fa-4704-4c9a-9165-90317d2d4443_model.json b/data/2025/2504_05xxx/2504.05741/97d4b9fa-4704-4c9a-9165-90317d2d4443_model.json new file mode 100644 index 0000000000000000000000000000000000000000..1a00ba191c5e1240c2b424bef1b4420ad77a84de --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/97d4b9fa-4704-4c9a-9165-90317d2d4443_model.json @@ -0,0 +1,3152 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.27, + 0.061, + 0.701 + ], + "angle": 270, + "content": "arXiv:2504.05741v2 [cs.CV] 9 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.295, + 0.131, + 0.703, + 0.153 + ], + "angle": 0, + "content": "DDT: Decoupled Diffusion Transformer" + }, + { + "type": "text", + "bbox": [ + 0.219, + 0.179, + 0.778, + 0.244 + ], + "angle": 0, + "content": "Shuai Wang1 Zhi Tian2 Weilin Huang2 Limin Wang1, * \n1Nanjing University 2ByteDance Seed Vision \nhttps://github.com/MCG-NJU/DDT" + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.267, + 0.353, + 0.463 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.102, + 0.469, + 0.322, + 0.48 + ], + "angle": 0, + "content": "(a) Our Decoupled Diffusion Transformer" + }, + { + "type": "image", + "bbox": [ + 0.361, + 0.268, + 0.568, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.369, + 0.469, + 0.579, + 0.48 + ], + "angle": 0, + "content": "(b) Conventional Diffusion Transformer" + }, + { + "type": "image", + "bbox": [ + 0.587, + 0.27, + 0.895, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.61, + 0.469, + 0.858, + 0.48 + ], + "angle": 0, + "content": "(c) FID compared with Other Diffusion Models" + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.485, + 0.907, + 0.515 + ], + "angle": 0, + "content": "Figure 1. Our decoupled diffusion transformer (DDT-XL/2) achieves a SoTA 1.31 FID under 256 epochs. Our decoupled diffusion transformer models incorporate a condition encoder to extract semantic self-conditions and a velocity decoder to decode velocity." + }, + { + "type": "title", + "bbox": [ + 0.249, + 0.526, + 0.327, + 0.542 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.569, + 0.485, + 0.903 + ], + "angle": 0, + "content": "Diffusion transformers have demonstrated remarkable generation quality, albeit requiring longer training iterations and numerous inference steps. In each denoising step, diffusion transformers encode the noisy inputs to extract the lower-frequency semantic component and then decode the higher frequency with identical modules. This scheme creates an inherent optimization dilemma: encoding low-frequency semantics necessitates reducing high-frequency components, creating tension between semantic encoding and high-frequency decoding. To resolve this challenge, we propose a new Decoupled Diffusion Transformer (DDT), with a decoupled design of a dedicated condition encoder for semantic extraction alongside a specialized velocity decoder. Our experiments reveal that a more substantial encoder yields performance improvements as model size increases. For ImageNet \\(256 \\times 256\\), Our DDT-XL/2 achieves a new state-of-the-art performance of 1.31 FID (nearly \\(4 \\times\\) faster training convergence compared to previous diffusion transformers). For ImageNet \\(512 \\times 512\\), Our DDT-XL/2 achieves a new state-of-the-art FID of 1.28. Additionally, as a beneficial by-product, our decoupled architecture enhances inference speed by enabling the sharing self" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.528, + 0.907, + 0.589 + ], + "angle": 0, + "content": "condition between adjacent denoising steps. To minimize performance degradation, we propose a novel statistical dynamic programming approach to identify optimal sharing strategies." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.636, + 0.645, + 0.652 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.662, + 0.907, + 0.783 + ], + "angle": 0, + "content": "Image generation is a fundamental task in computer vision research, which aims at capturing the inherent data distribution of original image datasets and generating high-quality synthetic images through distribution sampling. Diffusion models [19, 21, 29, 30, 41] have recently emerged as highly promising solutions to learn the underlying data distribution in image generation, outperforming the GAN-based models [3, 40] and Auto-Regressive models [5, 43, 51]." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.784, + 0.909, + 0.877 + ], + "angle": 0, + "content": "The diffusion forward process gradually adds Gaussian noise to the pristine data following an SDE forward schedule [19, 21, 41]. The denoising process learns the score estimation from this corruption process. Once the score function is accurately learned, data samples can be synthesized by numerically solving the reverse SDE [21, 29, 30, 41]." + }, + { + "type": "page_footnote", + "bbox": [ + 0.537, + 0.888, + 0.808, + 0.901 + ], + "angle": 0, + "content": ": Corresponding author (lmwang@nju.edu.cn)." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.212 + ], + "angle": 0, + "content": "Diffusion Transformers [32, 36] introduce the transformer architecture into diffusion models to replace the traditionally dominant UNet-based model [2, 10]. Empirical evidence suggests that, given sufficient training iterations, diffusion transformers outperform conventional approaches even without relying on long residual connections [36]. Nevertheless, their slow convergence rate still poses great challenge for developing new models due to the high cost." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.215, + 0.482, + 0.397 + ], + "angle": 0, + "content": "In this paper, we want to tackle the aforementioned major disadvantages from a model design perspective. Classic computer vision algorithms [4, 17, 23] strategically employ encoder-decoder architectures, prioritizing large encoders for rich feature extraction and lightweight decoders for efficient inference, while contemporary diffusion models predominantly rely on conventional decoder-only structures. We systematically investigate the underexplored potential of decoupled encoder-decoder designs in diffusion transformers, by answering the question of can decoupled encoder-decoder transformer unlock the capability of accelerated convergence and enhanced sample quality?" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.398, + 0.483, + 0.671 + ], + "angle": 0, + "content": "Through investigation experiments, we conclude that the plain diffusion transformer has an optimization dilemma between abstract structure information extraction and detailed appearance information recovery. Further, the diffusion transformer is limited in extracting semantic representation due to the raw pixel supervision [28, 52, 53]. To address this issue, we propose a new architecture to explicitly decouple low-frequency semantic encoding and high-frequency detailed decoding through a customized encoder-decoder design. We call this encoder-decoder diffusion transformer model as DDT (Decoupled Diffusion Transformer). DDT incorporates a condition encoder to extract semantic self-condition features. The extracted self-condition is fed into a velocity decoder along with the noisy latent to regress the velocity field. To maintain the local consistency of self-condition features of adjacent steps, we employ direct supervision of representation alignment and indirect supervision from the velocity regression loss of the decoder." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.672, + 0.482, + 0.793 + ], + "angle": 0, + "content": "In the ImageNet \\(256 \\times 256\\) dataset, using the traditional off-shelf VAE [38], our decoupled diffusion transformer (DDT-XL/2) model achieves the state-of-the-art performance of 1.31 FID with interval guidance under only 256 epochs, approximately \\(4 \\times\\) training acceleration compared to REPA [52]. In the ImageNet \\(512 \\times 512\\) dataset, our DDT-XL/2 model achieves 1.28 FID within 500K finetuning steps." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.796, + 0.482, + 0.903 + ], + "angle": 0, + "content": "Furthermore, our DDT achieves strong local consistency on its self-condition feature from the encoder. This property can significantly boost the inference speed by sharing the self-condition between adjacent steps. We formulate the optimal encoder sharing strategy solving as a classic minimal sum path problem by minimizing the performance drop of sharing self-condition among adjacent steps. We propose" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.905, + 0.166 + ], + "angle": 0, + "content": "a statistic dynamic programming approach to find the optimal encoder sharing strategy with negligible second-level time cost. Compared with the naive uniform sharing, our dynamic programming delivers a minimal FID drop. Our contributions are summarized as follows." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.168, + 0.905, + 0.213 + ], + "angle": 0, + "content": "- We propose a new decoupled diffusion transformer model, which consists of a condition encoder and a velocity decoder." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.214, + 0.905, + 0.273 + ], + "angle": 0, + "content": "- We propose statistic dynamic programming to find the optimal self-condition sharing strategy to boost inference speed while keeping minimal performance downgradation." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.274, + 0.905, + 0.349 + ], + "angle": 0, + "content": "- In the ImageNet \\(256 \\times 256\\) dataset, using tradition SDf8d4 VAE, our decoupled diffusion transformer (DDT-XL/2) model achieves the SoTA 1.31 FID with interval guidance under only 256 epochs, approximately \\(4 \\times\\) training acceleration compared to REPA [52]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.349, + 0.905, + 0.395 + ], + "angle": 0, + "content": "- In the ImageNet \\(512 \\times 512\\) dataset, our DDT-XL/2 model achieves the SoTA 1.28 FID, outperforming all previous methods with a significant margin." + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.168, + 0.905, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.412, + 0.655, + 0.427 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.439, + 0.907, + 0.711 + ], + "angle": 0, + "content": "Diffusion Transformers. The pioneering work of DiT [36] introduced transformers into diffusion models to replace the traditionally dominant UNet architecture [2, 10]. Empirical evidence demonstrates that given sufficient training iterations, diffusion transformers outperform conventional approaches even without relying on long residual connections. SiT [32] further validated the transformer architecture with linear flow diffusion. Following the simplicity and scalability of the diffusion transformer [32, 36], SD3 [12], Lumina [54], and PixArt [6, 7] introduced the diffusion transformer to more advanced text-to-image areas. Moreover, recently, diffusion transformers have dominated the text-to-video area with substantiated visual and motion quality [1, 20, 24]. Our decoupled diffusion transformer (DDT) presents a new variant within the diffusion transformer family. It achieves faster convergence by decoupling the low-frequency encoding and the high-frequency decoding." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.735, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Fast Diffusion Training. To accelerate the training efficiency of diffusion transformers, recent advances have pursued multi-faceted optimizations. Operator-centric approaches [13, 45, 48, 49] leverage efficient attention mechanisms: linear-attention variants [13, 45, 49] reduced quadratic complexity to speed up training, while sparse-attention architectures [48] prioritized sparsely relevant token interactions. Resampling approaches [12, 16] proposed lognorm sampling [12] or loss reweighting [16] techniques to stabilize training dynamics. Representation learning enhancement approaches integrate external inductive biases:" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.097, + 0.091, + 0.297, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.297, + 0.092, + 0.497, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.497, + 0.092, + 0.699, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.7, + 0.092, + 0.903, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.096, + 0.247, + 0.197, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.247, + 0.297, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.297, + 0.247, + 0.398, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.398, + 0.247, + 0.498, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.247, + 0.599, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.599, + 0.247, + 0.7, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.7, + 0.247, + 0.8, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.8, + 0.247, + 0.902, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.096, + 0.325, + 0.197, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.325, + 0.297, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.297, + 0.325, + 0.398, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.398, + 0.325, + 0.498, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.325, + 0.599, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.6, + 0.325, + 0.7, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.7, + 0.325, + 0.8, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.8, + 0.325, + 0.902, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.416, + 0.907, + 0.445 + ], + "angle": 0, + "content": "Figure 2. Selected \\(256 \\times 256\\) and \\(512 \\times 512\\) resolution samples. Generated from DDT-XL/2 trained on ImageNet \\(256 \\times 256\\) resolution and ImageNet \\(512 \\times 512\\) resolution with CFG = 4.0." + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.459, + 0.481, + 0.608 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.62, + 0.483, + 0.676 + ], + "angle": 0, + "content": "Figure 3. The reverse-SDE process (generation) of SiT-XL/2 in \\( x \\) space. There is a clear generation process from low frequency to high frequency. Most of the time is spent on generating high-frequency details (from \\( t = 0.4 \\) to \\( t = 1.0 \\))." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.692, + 0.483, + 0.784 + ], + "angle": 0, + "content": "REPA [52], RCG [27] and DoD [53] borrowed vision-specific priors into diffusion training, while masked modeling techniques [14, 15] strengthened spatial reasoning by enforcing structured feature completion during denoising. Collectively, these strategies address computational, sampling, and representational bottlenecks." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.799, + 0.292, + 0.817 + ], + "angle": 0, + "content": "3. Preliminary Analysis" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.825, + 0.484, + 0.903 + ], + "angle": 0, + "content": "Linear-based flow matching [29, 30, 32] represents a specialized family of diffusion models that we focus on as our primary analytical subject due to its simplicity and efficiency. For the convenience of discussion, in certain situations, diffusion and flow-matching will be used interchange-" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.455, + 0.713, + 0.57 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.713, + 0.456, + 0.902, + 0.57 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.58, + 0.907, + 0.636 + ], + "angle": 0, + "content": "Figure 4. The FID50K metric of SiT-XL/2 for different timeshift values. We employ a 2-nd order Adams-like solver to collect the performance. Allocating more computation at noisy steps significantly improves the performance." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.646, + 0.906, + 0.676 + ], + "angle": 0, + "content": "ably. In this framework, \\( t = 0 \\) corresponds to the pure noise timestep." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.681, + 0.907, + 0.831 + ], + "angle": 0, + "content": "As illustrated in Fig. 3, diffusion models perform autoregressive refinement on spectral components [11, 37]. The diffusion transformer encodes the noisy latent to capture lower-frequency semantics before decoding higher-frequency details. However, this semantics encoding process inevitably attenuates high-frequency information, creating an optimization dilemma. This observation motivates our proposal to decouple the conventional decode-only diffusion transformer into an explicit encoder-decoder architecture." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.856, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Lemma 1. For a linear flow-matching noise scheduler at timestep \\( t \\), let us denote \\( K_{\\text{freq}} \\) as the maximum frequency of the clean data \\( \\mathbf{x}_{\\text{data}} \\). The maximum retained frequency" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.091, + 0.092, + 0.274, + 0.106 + ], + "angle": 0, + "content": "in the noisy latent satisfies:" + }, + { + "type": "equation", + "bbox": [ + 0.156, + 0.116, + 0.483, + 0.158 + ], + "angle": 0, + "content": "\\[\nf _ {\\max } (t) > \\min \\left(\\left(\\frac {t}{1 - t}\\right) ^ {2}, K _ {\\text {f r e q}}\\right). \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.168, + 0.484, + 0.44 + ], + "angle": 0, + "content": "Lemma 1 is directly borrowed from [11, 37], we place the proof of Lemma 1 in Appendix. According to Lemma 1, as \\( t \\) increases to less noisy timesteps, semantic encoding becomes easier (due to noise reduction) while decoding complexity increases (as residual frequencies grow). Consider the worst-case scenario at denoising step \\( t \\), the diffusion transformer encodes frequencies up to \\( f_{max}(t) \\), to progress to step \\( s \\), it must decode a residual frequency of at least \\( f_{max}(s) - f_{max}(t) \\). Failure to decode these residual frequencies at step \\( t \\) creates a critical bottleneck for progression to subsequent steps. From this perspective, if allocating more of the calculations to more noisy timesteps can lead to an improvement, it means that diffusion transformers struggle with encoding lower frequency to provide semantics. Otherwise, if allocating more of the calculations to less noisy timesteps can lead to an improvement, it means that flow-matching transformers struggle with decoding higher frequency to provide fine details." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.44, + 0.484, + 0.635 + ], + "angle": 0, + "content": "To figure out the bottom-necks of current diffusion models, we conducted a targeted experiment using SiT-XL/2 with a second-order Adams-like linear multistep solver. As shown in Fig. 4, by varying the time-shift values, we demonstrate that allocating more computation to early timesteps improves final performance compared to uniform scheduling. This reveals that diffusion models face challenges in more noisy steps. This leads to a key conclusion: Current diffusion transformers are fundamentally constrained by their low-frequency semantic encoding capacity. This insight motivates the exploration of encoder-decoder architectures with strategic encoder parameter allocation." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.636, + 0.484, + 0.743 + ], + "angle": 0, + "content": "Prior researches further support this perspective. While lightweight diffusion MLP heads demonstrate limited decoding capacity, MAR [28] overcomes this limitation through semantic latents produced by its masked backbones, enabling high-quality image generation. Similarly, REPA [52] enhances low-frequency encoding through alignment with pre-trained vision foundations [35]." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.755, + 0.182, + 0.77 + ], + "angle": 0, + "content": "4. Method" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.78, + 0.484, + 0.903 + ], + "angle": 0, + "content": "Our decoupled diffusion transformer architecture comprises a condition encoder and a velocity decoder. The condition encoder extracted the low-frequency component from noisy input, class label, and timestep to serve as a self-condition for the velocity decoder; the velocity decoder processed the noisy latent with the self-condition to regress the high-frequency velocity. We train this model using the established linear flow diffusion framework. For brevity," + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.907, + 0.122 + ], + "angle": 0, + "content": "we designate our model as DDT (Decoupled Diffusion Transformer)." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.131, + 0.697, + 0.145 + ], + "angle": 0, + "content": "4.1. Condition Encoder" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.153, + 0.907, + 0.258 + ], + "angle": 0, + "content": "The condition encoder mirrors the architectural design and input structure of DiT/SiT with improved micro-design. It is built with interleaved Attention and FFN blocks, without long residual connections. The encoder processes three inputs, the noisy latent \\( \\boldsymbol{x}_t \\), timestep \\( t \\), and class label \\( y \\) to extract the self-condition feature \\( \\boldsymbol{z}_t \\) through a series of stacked Attention and FFN blocks:" + }, + { + "type": "equation", + "bbox": [ + 0.625, + 0.27, + 0.907, + 0.288 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {z} _ {t} = \\operatorname {E n c o d e r} \\left(\\boldsymbol {x} _ {t}, t, y\\right). \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.297, + 0.906, + 0.401 + ], + "angle": 0, + "content": "Specifically, the noisy latent \\( \\boldsymbol{x}_t \\) are patched into continuous tokens and then fed to extract the self-condition \\( \\boldsymbol{z}_t \\) with aforementioned encoder blocks. The timestep \\( t \\) and class label \\( y \\) serve as external-conditioning information projected into embedding. These external-condition embeddings are progressively injected into the encoded features of \\( \\boldsymbol{x}_t \\) using AdaLN-Zero[36] within each encoder block." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.403, + 0.906, + 0.506 + ], + "angle": 0, + "content": "To maintain local consistency of \\( z_{t} \\) across adjacent timesteps, we adopt the representation alignment technique from REPA [52]. Shown in Eq. (3), this method aligns the intermediate feature \\( \\mathbf{h}_i \\) from the \\( i \\)-th layer in the self-mapping encoder with the DINOV2 representation \\( r_* \\). Consistent to REPA [52], the \\( h_{\\phi} \\) is the learnable projection MLP:" + }, + { + "type": "equation", + "bbox": [ + 0.613, + 0.508, + 0.905, + 0.526 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {e n c} = 1 - \\cos \\left(r _ {*}, h _ {\\phi} \\left(\\mathbf {h} _ {\\mathbf {i}}\\right)\\right). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.53, + 0.906, + 0.635 + ], + "angle": 0, + "content": "This simple regularization accelerates training convergence, as shown in REPA [52], and facilitates local consistency of \\( \\boldsymbol{z}_t \\) between adjacent steps. It allows sharing the self-condition \\( \\boldsymbol{z}_t \\) produced by the encoder between adjacent steps. Our experiments demonstrate that this encoder-sharing strategy significantly enhances inference efficiency with only negligible performance degradation." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.636, + 0.906, + 0.666 + ], + "angle": 0, + "content": "Additionally, the encoder also receives indirect supervision from the decoder, which we elaborate on later." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.675, + 0.681, + 0.69 + ], + "angle": 0, + "content": "4.2. Velocity Decoder" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.697, + 0.907, + 0.832 + ], + "angle": 0, + "content": "The velocity decoder adopts the same architectural design as the condition encoder and consists of several stacked interleaved Attention and FFN blocks, akin to DiT/SiT. It takes the noisy latent \\( \\boldsymbol{x}_t \\), timestep \\( t \\), and self-conditioning \\( \\boldsymbol{z}_t \\) as inputs to estimate the velocity \\( \\boldsymbol{v}_t \\). Unlike the encoder, we assume that class label information is already embedded within \\( \\boldsymbol{z}_t \\). Thus, only the external-condition timestep \\( t \\) and self-condition feature \\( \\boldsymbol{z}_t \\) are used as condition inputs for the decoder blocks:" + }, + { + "type": "equation", + "bbox": [ + 0.623, + 0.844, + 0.905, + 0.861 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {v} _ {t} = \\mathbf {D e c o d e r} \\left(\\boldsymbol {x} _ {t}, t, \\boldsymbol {z} _ {t}\\right). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.906, + 0.902 + ], + "angle": 0, + "content": "As demonstrated previously, to further improve consistency of self-condition \\( z_{t} \\) between adjacent steps, we employ" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.486, + 0.138 + ], + "angle": 0, + "content": "AdaLN-Zero [36] to inject \\( \\mathbf{z}_t \\) into the decoder feature. The decoder is trained with the flow matching loss as shown in Eq. (5):" + }, + { + "type": "equation", + "bbox": [ + 0.156, + 0.147, + 0.483, + 0.182 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {d e c} = \\mathbb {E} \\left[ \\int_ {0} ^ {1} \\left| \\left(\\boldsymbol {x} _ {d a t a} - \\epsilon\\right) - \\boldsymbol {v} _ {t} \\right| ^ {2} \\mathrm {d} t \\right]. \\tag {5}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.189, + 0.299, + 0.207 + ], + "angle": 0, + "content": "4.3. Sampling acceleration" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.212, + 0.484, + 0.302 + ], + "angle": 0, + "content": "By incorporating explicit representation alignment into the encoder and implicit self-conditioning injection into the decoder, we achieve local consistency of \\( z_{t} \\) across adjacent steps during training (shown in Fig. 5). This enables us to share \\( z_{t} \\) within a suitable local range, reducing the computational burden on the self-mapping encoder." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.302, + 0.484, + 0.409 + ], + "angle": 0, + "content": "Formally, given total inference steps \\(N\\) and encoder computation bugets \\(K\\), thus the sharing ratio is \\(1 - \\frac{K}{N}\\), we define \\(\\Phi\\) with \\(|\\Phi| = K\\) as the set of timesteps where the self-condition is recalculated, as shown in Equation 6. If the current timestep \\(t\\) is not in \\(\\Phi\\), we reuse the previously computed \\(z_{t - \\Delta t}\\) as \\(z_t\\). Otherwise, we recompute \\(z_t\\) using the encoder and the current noisy latent \\(x_t\\):" + }, + { + "type": "equation", + "bbox": [ + 0.16, + 0.418, + 0.483, + 0.46 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {z} _ {t} = \\left\\{ \\begin{array}{l l} \\boldsymbol {z} _ {t - \\Delta t}, & \\text {i f} t \\notin \\Phi \\\\ \\mathbf {E n c o d e r} \\left(\\boldsymbol {x} _ {t}, t, y\\right), & \\text {i f} t \\in \\Phi \\end{array} \\right. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.475, + 0.484, + 0.643 + ], + "angle": 0, + "content": "Uniform Encoder Sharing. This naive approach recalculate self-condition \\( z_{t} \\) every \\( \\frac{N}{K} \\) steps. Previous work, such as DeepCache [33], uses this naive handcrafted uniform \\( \\Phi \\) set to accelerate UNet models. However, UNet models, trained solely with a denoising loss and lacking robust representation alignment, exhibit less regularized local consistency in deeper features across adjacent steps compared to our DDT model. Also, we will propose a simple and elegant statistic dynamic programming algorithm to construct \\( \\Phi \\). Our statistic dynamic programming can exploit the optimal \\( \\Phi \\) set optimally compared to the naive approaches [33]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.659, + 0.484, + 0.8 + ], + "angle": 0, + "content": "Statistic Dynamic Programming. We construct the statistic similarity matrix of \\( z_{t} \\) among different steps \\( \\mathbf{S} \\in R^{N \\times N} \\) using cosine distance. The optimal \\( \\Phi \\) set would guarantee the total similarity cost \\( -\\sum_{k}^{K} \\sum_{i = \\Phi_{k}}^{\\Phi_{k + 1}} S[\\Phi_{k}, i] \\) achieves global minimal. This question is a well-formed classic minimal sum path problem, it can be solved by dynamic programming. As shown in Eq. (8), we donate \\( \\mathbf{C}_{i}^{k} \\) as cost and \\( \\mathbf{P}_{i}^{k} \\) as traced path when \\( \\Phi_{k} = i \\). The state transition function from \\( \\mathbf{C}_{j}^{k - 1} \\) to \\( \\mathbf{C}_{i}^{k} \\) follows:" + }, + { + "type": "equation", + "bbox": [ + 0.15, + 0.81, + 0.483, + 0.841 + ], + "angle": 0, + "content": "\\[\n\\mathbf {C} _ {i} ^ {k} = \\min _ {j = 0} ^ {i} \\left\\{\\mathbf {C} _ {j} ^ {k - 1} - \\Sigma_ {l = j} ^ {i} \\mathbf {S} [ j, l ] \\right\\}. \\tag {7}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.152, + 0.842, + 0.483, + 0.863 + ], + "angle": 0, + "content": "\\[\n\\mathbf {P} _ {i} ^ {k} = \\operatorname {a r g m i n} _ {j = 0} ^ {i} \\left\\{\\mathbf {C} _ {i} ^ {k - 1} - \\Sigma_ {l = j} ^ {i} \\mathbf {S} [ j, l ] \\right\\}. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.483, + 0.903 + ], + "angle": 0, + "content": "After obtaining the cost matrix \\(\\mathbf{C}\\) and tracked path \\(\\mathbf{P}\\), the optimal \\(\\Phi\\) can be solved by backtracking \\(\\mathbf{P}\\) from \\(\\mathbf{P}_N^K\\)." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.09, + 0.64, + 0.108 + ], + "angle": 0, + "content": "5. Experiment" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.115, + 0.907, + 0.312 + ], + "angle": 0, + "content": "We conduct experiments on 256x256 ImageNet datasets. The total training batch size is set to 256. Consistent with methodological approaches such as SiT [32], DiT [36], and REPA [52], we employed the Adam optimizer with a constant learning rate of 0.0001 throughout the entire training process. To ensure a fair comparative analysis, we did not use gradient clipping and learning rate warm-up techniques. Our default training infrastructure consisted of \\(16 \\times\\) or \\(8 \\times\\) A100 GPUs. For sampling, we take the Euler solver with 250 steps as the default choice. As for the VAE, we take the off-shelf VAE-ft-EMA with a downsample factor of 8 from Huggingface1. We report FID [18], sFID [34], IS [39], Precision and Recall [25]." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.319, + 0.7, + 0.335 + ], + "angle": 0, + "content": "5.1. Improved baselines" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.341, + 0.907, + 0.552 + ], + "angle": 0, + "content": "Recent architectural improvements such as SwiGLU [46, 47], RoPE [42], and RMSNorm [46, 47] have been extensively validated in the research community [8, 31, 50]. Additionally, lognorm sampling [12] has demonstrated significant benefits for training convergence. Consequently, we developed improved baseline models by incorporating these advanced techniques, drawing inspiration from recent works in the field. The performance of these improved baselines is comprehensively provided in Tab. 2. To validate the reliability of our implementation, we also reproduced the results for REPA-B/2, achieving metrics that marginally exceed those originally reported in the REPA[52]. These reproduction results provide additional confidence in the robustness of our approach." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.553, + 0.906, + 0.645 + ], + "angle": 0, + "content": "The improved baselines in our Tab. 2 consistently outperform their predecessors without REPA. However, upon implementing REPA, performance rapidly approaches a saturation point. This is particularly evident in the XL model size, where incremental technique improvements yield diminishingly small gains." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.65, + 0.809, + 0.667 + ], + "angle": 0, + "content": "5.2. Metric comparison with baselines" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.672, + 0.907, + 0.869 + ], + "angle": 0, + "content": "We present the performances of different-size models at 400K training steps in Tab. 2. Our diffusion encoder-decoder transformer(DDT) family demonstrates consistent and significant improvements across various model sizes. Our DDT-B/2(8En4De) model exceeds Improved-REPA-B/2 by 2.8 FID gains. Our DDT-XL/2(22En6De) exceeds REPA-XL/2 by 1.3 FID gains. While the decoder-only diffusion transformers approach performance saturation with REPA[52], our DDT models continue to deliver superior results. The incremental technique improvements show diminishing gains, particularly in larger model sizes. However, our DDT models maintain a significant performance advantage, underscoring the effectiveness of our approach." + }, + { + "type": "page_footnote", + "bbox": [ + 0.513, + 0.875, + 0.905, + 0.9 + ], + "angle": 0, + "content": "1https://huggingface.co/stabilityai/sd-vae-ft-ema" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.14, + 0.089, + 0.856, + 0.422 + ], + "angle": 0, + "content": "
ParamsEpochs256×256, w/o CFG256×256, w/ CFG
FID↓IS↑Pre.↑Rec.↑FID↓IS↑Pre.↑Rec.↑
MAR-B [28]208M8003.48192.40.780.582.31281.70.820.57
CausalFusion [9]368M8005.12166.10.730.661.94264.40.820.59
LDM-4 [38]400M17010.56103.50.710.623.6247.70.870.48
DDT-L (Ours)458M807.98128.10.680.671.64310.50.810.61
MAR-L [28]479M8002.6221.40.790.601.78296.00.810.60
VAVAE [50]675M8002.17205.60.770.651.35295.30.790.65
CausalFusion [9]676M8003.61180.90.750.661.77282.30.820.61
ADM [10]554M40010.94-0.690.634.59186.70.820.52
DiT-XL [36]675M14009.62121.50.670.672.27278.20.830.57
SiT-XL [32]675M14008.3---2.06270.30.820.59
ViT-XL [16]451M4008.10---2.06---
U-ViT-H/2 [2]501M4006.58---2.29263.90.820.57
MaskDiT [14]675M16005.69178.00.740.602.28276.60.800.61
FlowDCN [48]618M4008.36122.50.690.652.00263.10.820.58
RDM [44]553M/5.27153.40.750.621.99260.40.810.58
REPA [52]675M8005.9157.80.700.691.42305.70.800.64
DDT-XL (Ours)675M806.62135.20.690.671.52263.70.780.63
DDT-XL (Ours)675M2566.30146.70.680.681.31308.10.780.62
DDT-XL (Ours)675M4006.27154.70.680.691.26310.60.790.65
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.424, + 0.907, + 0.455 + ], + "angle": 0, + "content": "Table 1. System performance comparison on ImageNet \\( {256} \\times {256} \\) class-conditioned generation. Gray blocks mean the algorithm uses VAE trained or fine-tuned on ImageNet instead of the off-shelf SD-VAE-f8d4-ft-ema." + }, + { + "type": "table", + "bbox": [ + 0.108, + 0.465, + 0.469, + 0.751 + ], + "angle": 0, + "content": "
ModelFID↓sFID↓IS↑Prec.↑Rec.↑
SiT-B/2 [32]33.06.4643.70.530.63
REPA-B/2 [52]24.46.4059.90.590.65
REPA-B/2(Reproduced)22.27.5069.10.590.65
DDT-B/2† (8En4De)21.17.8173.00.600.65
Improved-SiT-B/225.16.5458.80.570.64
Improved-REPA-B/219.16.8876.490.600.66
DDT-B/2 (8En4De)16.326.6386.00.620.66
SiT-L/2 [32]18.85.2972.00.640.64
REPA-L/2 [52]10.05.20109.20.690.65
Improved-SiT-L/212.75.4895.70.650.65
Improved-REPA-L/29.35.44116.60.670.66
DDT-L/2 (20En4De)7.985.50128.10.680.67
SiT-XL/2 [32]17.25.0776.520.650.63
REPA-XL/2 [52]7.95.06122.60.700.65
Improved-SiT-XL/210.95.3103.40.660.65
Improved-REPA-XL/28.145.34124.90.680.67
DDT-XL/2 (22En6De)6.624.86135.10.690.67
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.761, + 0.483, + 0.873 + ], + "angle": 0, + "content": "Table 2. Metrics of \\(400K\\) training steps with different model sizes. All results are reported without classifier-free guidance. gray means metrics are copied from the original paper, otherwise it is produced by our codebase. By default, our DDT models are built on improved baselines. \\(\\mathrm{DDT}^{\\dagger}\\) means model built on naive baseline without architecture improvement and lognorm sampling, consistent to REPA. Our DDT models consistently outperformed their counterparts." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.468, + 0.744, + 0.485 + ], + "angle": 0, + "content": "5.3. System level comparison" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.49, + 0.907, + 0.763 + ], + "angle": 0, + "content": "ImageNet \\(256 \\times 256\\). We report the final metrics of DDT-XL/2 (22En6De) and DDT-L/2 (20En4De) at Tab. 1. Our DDT models demonstrate exceptional efficiency, achieving convergence in approximately \\(\\frac{1}{4}\\) of the total epochs compared to REPA [52] and other diffusion transformer models. In order to maintain methodological consistency with REPA, we employed the classifier-free guidance with 2.0 in the interval [0.3, 1]. Our models delivered impressive results: DDT-L/2 achieved 1.64 FID, and DDT-XL/2 got 1.52 FID within just 80 epochs. By extending training to 256 epochs—still significantly more efficient than traditional 800-epoch approaches—our DDT-XL/2 established a new state-of-the-art benchmark of 1.31 FID on ImageNet \\(256 \\times 256\\), decisively outperforming previous diffusion transformer methodologies. To extend training to 400 epochs, our DDT-XL/2(22En6De) achieves 1.26 FID, nearly reaching the upper limit of SD-VAE-ft-EMA-f8d4, which has a 1.20 rFID on ImageNet256." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.909, + 0.903 + ], + "angle": 0, + "content": "ImageNet \\(512 \\times 512\\) We provide the final metrics of DDT-XL/2 at Tab. 3. To validate the superiority of our DDT model, we take our DDT-XL/2 trained on ImageNet \\(256 \\times\\) 256 under 256 epochs as the initialization, fine-tune out DDT-XL/2 on ImageNet \\(512 \\times 512\\) for \\(100K\\) steps. We adopt the aforementioned interval guidance [26] and we achieved a remarkable state-of-the-art performance of 1.90 FID, decisively outperforming REPA by a significant 0.28" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.506, + 0.937 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.1, + 0.089, + 0.477, + 0.27 + ], + "angle": 0, + "content": "
ImageNet 512 × 512
ModelFID↓sFID↓IS↑Pre.↑Rec.↑
BigGAN-deep [3]8.438.13177.900.880.29
StyleGAN-XL [40]2.414.06267.750.770.52
ADM-G [10]7.726.57172.710.870.42
ADM-G, ADM-U3.855.86221.720.840.53
DiT-XL/2 [36]3.045.02240.820.840.54
SiT-XL/2 [32]2.624.18252.210.840.57
REPA-XL/2 [52]2.084.19274.60.830.58
FlowDCN-XL/2 [48]2.444.53252.80.840.54
DDT-XL/2 (500K)1.284.22305.10.800.63
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.279, + 0.483, + 0.35 + ], + "angle": 0, + "content": "Table 3. Benchmarking class-conditional image generation on ImageNet \\(512 \\times 512\\). Our DDT-XL/2(512 × 512) is fine-tuned from the same model trained on \\(256 \\times 256\\) resolution setting of 1.28M steps. We adopt the interval guidance with interval [0.3, 1] and CFG of 3.0" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.354, + 0.484, + 0.432 + ], + "angle": 0, + "content": "performance margin. In Tab. 3, some metrics exhibit subtle degradation, we attribute this to potentially insufficient fine-tuning. When allocating more training iterations to DDT-XL/2, it achieves 1.28 FID at 500K steps with CFG3.0 within the time interval [0.3, 1.0]." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.442, + 0.382, + 0.459 + ], + "angle": 0, + "content": "5.4. Acceleration by Encoder sharing" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.465, + 0.484, + 0.542 + ], + "angle": 0, + "content": "As illustrated in Fig. 5, there is a strong local consistency of the self-condition in our condition encoder. Even \\( \\boldsymbol{z}_{t=0} \\) has a strong similarity above 0.8 with \\( \\boldsymbol{z}_{t=1} \\). This consistency provides an opportunity to speed up inference by sharing the encoder between adjacent steps." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.542, + 0.484, + 0.738 + ], + "angle": 0, + "content": "We employed the simple uniform encoder sharing strategy and the new novel statistics dynamic programming strategy. Specifically, for the uniform strategy, we only recalculate the self-condition \\( z_{t} \\) every \\( K \\) steps. For statistics dynamic programming, we solve the aforementioned minimal sum path on the similarity matrix by dynamic programming and recalculate \\( z_{t} \\) according to the solved strategy. As shown in Fig. 6, there is a significant inference speedup nearly without visual quality loss when \\( K \\) is smaller than 6. As shown in Tab. 4, the metrics loss is still marginal, while the inference speedup is significant. The novel statistics dynamic programming slightly outperformed the naive uniform strategy with less FID drop." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.749, + 0.203, + 0.765 + ], + "angle": 0, + "content": "5.5. Ablations" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.772, + 0.483, + 0.849 + ], + "angle": 0, + "content": "We conduct ablation studies on ImageNet \\(256 \\times 256\\) with DDT-B/2 and DDT-L/2. For sampling, we take the Euler solver with 250 steps as the default choice without classifier-free guidance. For training, we train each model with 80 epochs(400k steps), and the batch size is set to 256." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.484, + 0.903 + ], + "angle": 0, + "content": "Encoder-Decoder Ratio we systematically explored ratios ranging from \\(2:1\\) to \\(5:1\\) across different model sizes." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.089, + 0.905, + 0.278 + ], + "angle": 0, + "content": "
SharRatioAccΦFID↓sFID↓IS↑Prec.↑Rec.↑
0.001.0×Uniform1.314.62308.10.780.66
0.501.6×Uniform1.314.48300.50.780.65
0.661.9×Uniform1.324.46301.20.780.65
0.752.3×Uniform1.344.43302.70.780.65
0.802.6×Uniform1.364.40303.30.780.64
StatisticDP1.334.37301.70.780.64
0.832.7×Uniform1.374.41302.80.780.64
StatisticDP1.364.35300.30.780.64
0.873.0×Uniform1.424.43302.80.780.64
StatisticDP1.404.35302.40.780.64
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.287, + 0.907, + 0.358 + ], + "angle": 0, + "content": "Table 4. Metrics of \\( {400K} \\) training steps with different model sizes. All results are reported without classifier-free guidance. gray means metrics are copied from the original paper, otherwise it is produced by our codebase. Our DDT models consistently outperformed its counterparts" + }, + { + "type": "image", + "bbox": [ + 0.529, + 0.367, + 0.909, + 0.605 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.619, + 0.907, + 0.661 + ], + "angle": 0, + "content": "Figure 5. The cosine similarity of self-condition feature \\( z_{t} \\) from encoder between different timesteps. There is a strong correlation between adjacent steps, indicating the redundancy." + }, + { + "type": "image", + "bbox": [ + 0.524, + 0.667, + 0.901, + 0.796 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.806, + 0.907, + 0.864 + ], + "angle": 0, + "content": "Figure 6. Sharing the self-condition \\( z_{t} \\) in adjacent steps significant speedup the inference. We tried various sharing frequency configurations. There is marginal visual quality down-gradation when the sharing frequency is reasonable." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.907, + 0.903 + ], + "angle": 0, + "content": "in Fig. 7 and Fig. 8. Our notation \\(m\\mathrm{En}n\\mathrm{De}\\) represents models with \\(m\\) encoder layers and \\(n\\) decoder layers. The inves" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.107, + 0.092, + 0.363, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.373, + 0.092, + 0.631, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.64, + 0.098, + 0.897, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.256, + 0.905, + 0.283 + ], + "angle": 0, + "content": "Figure 7. The DDT-B/2 built upon Improved-baselines under various Encoder and Decoder layer ratio. DDT-B/2(8En4De) achieves much faster convergence speed and better performance." + }, + { + "type": "image", + "bbox": [ + 0.107, + 0.284, + 0.363, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.373, + 0.285, + 0.63, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.64, + 0.288, + 0.897, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.446, + 0.905, + 0.474 + ], + "angle": 0, + "content": "Figure 8. The DDT-L/2 built upon Improved-baselines under various Encoder and Decoder layer ratio. DDT-L/2 prefers an unexpected aggressive encoder-decoder ratio DDT-L/2(20En4De) achieves much faster convergence speed and better performance." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.483, + 0.483, + 0.679 + ], + "angle": 0, + "content": "tigation experiments in Fig. 7 and Fig. 8 revealed critical insights into architectural optimization. We observed that a larger encoder is beneficial for further improving the performance as the model size increases. For the Base model in Fig. 7, the optimal configuration emerged as 8 encoder layers and 4 decoder layers, delivering superior performance and convergence speed. Notably, the Large model in Fig. 8 exhibited a distinct preference, achieving peak performance with 20 encoder layers and 4 decoder layers, an unexpectedly aggressive encoder-decoder ratio. This unexpected discovery motivates us to scale the layer ratio in DDT-XL/2 to 22 encoder layers and 6 decoders to explore the performance upper limits of diffusion transformers." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.697, + 0.483, + 0.834 + ], + "angle": 0, + "content": "Decoder Block types. In our investigation of decoder block types and their impact on high-frequency decoding performance, we systematically evaluated multiple architectural configurations. Our comprehensive assessment included alternative approaches such as simple \\(3 \\times 3\\) convolution blocks and naive MLP blocks. As shown in Tab. 5, the default (Attention with the MLP) setting achieves better results. Thanks to the encoder-decoder design, naive Conv blocks even achieve comparable results." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.848, + 0.21, + 0.863 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.872, + 0.483, + 0.901 + ], + "angle": 0, + "content": "In this paper, we have introduced a novel Decoupled Diffusion Transformer, which rethinks the optimization dilemma" + }, + { + "type": "table", + "bbox": [ + 0.554, + 0.48, + 0.868, + 0.553 + ], + "angle": 0, + "content": "
DecoderBlockFID↓sFID↓IS↑Prec.↑Rec.↑
Conv+MLP16.967.3385.10.620.65
MLP+MLP24.137.8965.00.570.65
Attn+MLP16.326.6386.00.620.66
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.562, + 0.907, + 0.618 + ], + "angle": 0, + "content": "Table 5. Metrics of \\(400K\\) training steps on DDT-B/2(8En4De) with different decoder blocks. All results are reported without classifier-free guidance. The Default Attention + MLP configuration achieves best performance." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.637, + 0.907, + 0.903 + ], + "angle": 0, + "content": "of the traditional diffusion transformer. By decoupling the low-frequency encoding and high-frequency decoding into dedicated components, we effectively resolved the optimization dilemma that has constrained diffusion transformer. Furthermore, we discovered that increasing the encoder capacity relative to the decoder yields increasingly beneficial results as the overall model scale grows. This insight provides valuable guidance for future model scaling efforts. Our experiments demonstrate that our DDT-XL/2 (22En6De) with an unexpected aggressive encoder-decoder layer ratio achieves great performance while requiring only 256 training epochs. This significant improvement in efficiency addresses one of the primary limitations of diffusion models: their lengthy training requirements. The decoupled architecture also presents opportunities for inference optimization through our proposed encoder result sharing mechanism. Our statistical dynamic programming approach for determining optimal sharing strategies enables faster inference while minimizing quality" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.091, + 0.092, + 0.486, + 0.123 + ], + "angle": 0, + "content": "degradation, demonstrating that architectural innovations can yield benefits beyond their primary design objectives." + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.147, + 0.188, + 0.162 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.173, + 0.483, + 0.24 + ], + "angle": 0, + "content": "[1] Niket Agarwal, Arslan Ali, Maciej Bala, Yogesh Balaji, Erik Barker, Tiffany Cai, Prithvijit Chattopadhyay, Yongxin Chen, Yin Cui, Yifan Ding, et al. Cosmos world foundation model platform for physical ai. arXiv preprint arXiv:2501.03575, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.242, + 0.483, + 0.311 + ], + "angle": 0, + "content": "[2] Fan Bao, Shen Nie, Kaiwen Xue, Yue Cao, Chongxuan Li, Hang Su, and Jun Zhu. All are worth words: A vit backbone for diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22669-22679, 2023. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.313, + 0.482, + 0.354 + ], + "angle": 0, + "content": "[3] Andrew Brock, Jeff Donahue, and Karen Simonyan. Large scale gan training for high fidelity natural image synthesis. arXiv preprint arXiv:1809.11096, 2018. 1, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.355, + 0.482, + 0.41 + ], + "angle": 0, + "content": "[4] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.411, + 0.482, + 0.478 + ], + "angle": 0, + "content": "[5] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11315-11325, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.481, + 0.482, + 0.549 + ], + "angle": 0, + "content": "[6] Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al. Pixart- \\(\\cdot\\) alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis. arXiv preprint arXiv:2310.00426, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.551, + 0.483, + 0.619 + ], + "angle": 0, + "content": "[7] Junsong Chen, Chongjian Ge, Enze Xie, Yue Wu, Lewei Yao, Xiaozhe Ren, Zhongdao Wang, Ping Luo, Huchuan Lu, and Zhenguo Li. Pixart- \\(\\backslash\\) sigma: Weak-to-strong training of diffusion transformer for 4k text-to-image generation. arXiv preprint arXiv:2403.04692, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.621, + 0.482, + 0.661 + ], + "angle": 0, + "content": "[8] Xiangxiang Chu, Jianlin Su, Bo Zhang, and Chunhua Shen. Visionllama: A unified llama interface for vision tasks. arXiv preprint arXiv:2403.00522, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.663, + 0.482, + 0.704 + ], + "angle": 0, + "content": "[9] Chaorui Deng, Deyao Zh, Kunchang Li, Shi Guan, and Haoqi Fan. Causal diffusion transformers for generative modeling. arXiv preprint arXiv:2412.12095, 2024. 6, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.705, + 0.482, + 0.746 + ], + "angle": 0, + "content": "[10] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 2, 6, 7, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.747, + 0.482, + 0.773 + ], + "angle": 0, + "content": "[11] Sander Dieleman. Diffusion is spectral autoregression, 2024. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.776, + 0.483, + 0.845 + ], + "angle": 0, + "content": "[12] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. arXiv preprint arXiv:2403.03206, 2024. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.846, + 0.483, + 0.9 + ], + "angle": 0, + "content": "[13] Zhengcong Fei, Mingyuan Fan, Changqian Yu, Debang Li, and Junshi Huang. Diffusion-rwkv: Scaling rwkv-like architectures for diffusion models. arXiv preprint arXiv:2404.04478, 2024. 2" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.173, + 0.483, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.161 + ], + "angle": 0, + "content": "[14] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Masked diffusion transformer is a strong image synthesizer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 23164-23173, 2023. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.163, + 0.905, + 0.232 + ], + "angle": 0, + "content": "[15] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Masked diffusion transformer is a strong image synthesizer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23164-23173, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.233, + 0.905, + 0.302 + ], + "angle": 0, + "content": "[16] Tiankai Hang, Shuyang Gu, Chen Li, Jianmin Bao, Dong Chen, Han Hu, Xin Geng, and Baining Guo. Efficient diffusion training via min-snr weighting strategy. In Proceedings of the IEEE/CVF international conference on computer vision, pages 7441-7451, 2023. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.302, + 0.905, + 0.37 + ], + "angle": 0, + "content": "[17] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollar, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 16000-16009, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.372, + 0.905, + 0.44 + ], + "angle": 0, + "content": "[18] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.442, + 0.905, + 0.483 + ], + "angle": 0, + "content": "[19] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.484, + 0.905, + 0.537 + ], + "angle": 0, + "content": "[20] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. arXiv preprint arXiv:2205.15868, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.539, + 0.905, + 0.593 + ], + "angle": 0, + "content": "[21] Tero Karras, Miika Aittala, Timo Aila, and Samuli Laine. Elucidating the design space of diffusion-based generative models. Advances in Neural Information Processing Systems, 35:26565-26577, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.595, + 0.905, + 0.635 + ], + "angle": 0, + "content": "[22] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.637, + 0.905, + 0.705 + ], + "angle": 0, + "content": "[23] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4015-4026, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.706, + 0.905, + 0.773 + ], + "angle": 0, + "content": "[24] Weijie Kong, Qi Tian, Zijian Zhang, Rox Min, Zuozhuo Dai, Jin Zhou, Jiangfeng Xiong, Xin Li, Bo Wu, Jianwei Zhang, et al. Hunyuanvideo: A systematic framework for large video generative models. arXiv preprint arXiv:2412.03603, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.776, + 0.905, + 0.832 + ], + "angle": 0, + "content": "[25] Tuomas Kynkänniemi, Tero Karras, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Improved precision and recall metric for assessing generative models. Advances in neural information processing systems, 32, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.833, + 0.905, + 0.899 + ], + "angle": 0, + "content": "[26] Tuomas Kynkänniemi, Miika Aittala, Tero Karras, Samuli Laine, Timo Aila, and Jaakko Lehtinen. Applying guidance in a limited interval improves sample and distribution quality in diffusion models. arXiv preprint arXiv:2404.07724, 2024. 6" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.937 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.486, + 0.147 + ], + "angle": 0, + "content": "[27] Tianhong Li, Dina Katabi, and Kaiming He. Return of unconditional generation: A self-supervised representation generation method. Advances in Neural Information Processing Systems, 37:125441-125468, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.149, + 0.483, + 0.205 + ], + "angle": 0, + "content": "[28] Tianhong Li, Yonglong Tian, He Li, Mingyang Deng, and Kaiming He. Autoregressive image generation without vector quantization. Advances in Neural Information Processing Systems, 37:56424-56445, 2025. 2, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.207, + 0.483, + 0.248 + ], + "angle": 0, + "content": "[29] Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le. Flow matching for generative modeling. arXiv preprint arXiv:2210.02747, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.25, + 0.483, + 0.291 + ], + "angle": 0, + "content": "[30] Xingchao Liu, Chengyue Gong, and Qiang Liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.293, + 0.483, + 0.346 + ], + "angle": 0, + "content": "[31] Zeyu Lu, Zidong Wang, Di Huang, Chengyue Wu, Xihui Liu, Wanli Ouyang, and Lei Bai. Fit: Flexible vision transformer for diffusion model. arXiv preprint arXiv:2402.12376, 2024.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.349, + 0.483, + 0.417 + ], + "angle": 0, + "content": "[32] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. Sit: Exploring flow and diffusion-based generative models with scalable interpolant transformers. arXiv preprint arXiv:2401.08740, 2024. 2, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.42, + 0.484, + 0.475 + ], + "angle": 0, + "content": "[33] Xinyin Ma, Gongfan Fang, and Xinchao Wang. Deepcache: Accelerating diffusion models for free. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 15762-15772, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.477, + 0.483, + 0.518 + ], + "angle": 0, + "content": "[34] Charlie Nash, Jacob Menick, Sander Dieleman, and Peter W Battaglia. Generating images with sparse representations. arXiv preprint arXiv:2103.03841, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.52, + 0.483, + 0.588 + ], + "angle": 0, + "content": "[35] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.59, + 0.483, + 0.644 + ], + "angle": 0, + "content": "[36] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 2, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.647, + 0.483, + 0.687 + ], + "angle": 0, + "content": "[37] Severi Rissanen, Markus Heinonen, and Arno Solin. Generative modelling with inverse heat dissipation. arXiv preprint arXiv:2206.13397, 2022. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.69, + 0.484, + 0.758 + ], + "angle": 0, + "content": "[38] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.76, + 0.483, + 0.814 + ], + "angle": 0, + "content": "[39] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.817, + 0.483, + 0.87 + ], + "angle": 0, + "content": "[40] Axel Sauer, Katja Schwarz, and Andreas Geiger. Styleganx1: Scaling stylegan to large diverse datasets. In ACM SIGGRAPH 2022 conference proceedings, pages 1-10, 2022. 1, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.873, + 0.483, + 0.901 + ], + "angle": 0, + "content": "[41] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.486, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.905, + 0.12 + ], + "angle": 0, + "content": "generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.122, + 0.905, + 0.176 + ], + "angle": 0, + "content": "[42] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.179, + 0.905, + 0.234 + ], + "angle": 0, + "content": "[43] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.236, + 0.905, + 0.29 + ], + "angle": 0, + "content": "[44] Jiayan Teng, Wendi Zheng, Ming Ding, Wenyi Hong, Jianqiao Wangni, Zhuoyi Yang, and Jie Tang. Relay diffusion: Unifying diffusion process across resolutions for image synthesis. arXiv preprint arXiv:2309.03350, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.292, + 0.905, + 0.346 + ], + "angle": 0, + "content": "[45] Yao Teng, Yue Wu, Han Shi, Xuefei Ning, Guohao Dai, Yu Wang, Zhenguo Li, and Xihui Liu. Dim: Diffusion mamba for efficient high-resolution image synthesis. arXiv preprint arXiv:2405.14224, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.349, + 0.905, + 0.417 + ], + "angle": 0, + "content": "[46] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.419, + 0.905, + 0.488 + ], + "angle": 0, + "content": "[47] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.49, + 0.905, + 0.544 + ], + "angle": 0, + "content": "[48] Shuai Wang, Zexian Li, Tianhui Song, Xubin Li, Tiezheng Ge, Bo Zheng, and Limin Wang. Flowdcn: Exploring dcn-like architectures for fast image generation with arbitrary resolution. arXiv preprint arXiv:2410.22655, 2024. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.546, + 0.905, + 0.587 + ], + "angle": 0, + "content": "[49] Jing Nathan Yan, Jiatao Gu, and Alexander M Rush. Diffusion models without attention. arXiv preprint arXiv:2311.18257, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.589, + 0.905, + 0.63 + ], + "angle": 0, + "content": "[50] Jingfeng Yao and Xinggang Wang. Reconstruction vs. generation: Taming optimization dilemma in latent diffusion models. arXiv preprint arXiv:2501.01423, 2025. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.632, + 0.905, + 0.672 + ], + "angle": 0, + "content": "[51] Qihang Yu, Ju He, Xueqing Deng, Xiaohui Shen, and Liang-Chieh Chen. Randomized autoregressive visual generation. arXiv preprint arXiv:2411.00776, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.674, + 0.905, + 0.742 + ], + "angle": 0, + "content": "[52] Sihyun Yu, Sangkyung Kwak, Huiwon Jang, Jongheon Jeong, Jonathan Huang, Jinwoo Shin, and Saining Xie. Representation alignment for generation: Training diffusion transformers is easier than you think. arXiv preprint arXiv:2410.06940, 2024. 2, 3, 4, 5, 6, 7, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.745, + 0.905, + 0.8 + ], + "angle": 0, + "content": "[53] Xiaoyu Yue, Zidong Wang, Zeyu Lu, Shuyang Sun, Meng Wei, Wanli Ouyang, Lei Bai, and Luping Zhou. Diffusion models need visual priors for image generation. arXiv preprint arXiv:2410.08531, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.802, + 0.905, + 0.869 + ], + "angle": 0, + "content": "[54] Le Zhuo, Ruoyi Du, Han Xiao, Yangguang Li, Dongyang Liu, Rongjie Huang, Wenze Liu, Lirui Zhao, Fu-Yun Wang, Zhanyu Ma, et al. Lumina-last: Making lumina-t2x stronger and faster with next-dit. arXiv preprint arXiv:2406.18583, 2024. 2" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.869 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.092, + 0.09, + 0.226, + 0.108 + ], + "angle": 0, + "content": "A. Model Specs" + }, + { + "type": "table", + "bbox": [ + 0.135, + 0.127, + 0.442, + 0.204 + ], + "angle": 0, + "content": "
Config#LayersHidden dim#Heads
B/21276812
L/224102416
XL/228115216
" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.242, + 0.275, + 0.26 + ], + "angle": 0, + "content": "B. Hyper-parameters" + }, + { + "type": "table", + "bbox": [ + 0.124, + 0.279, + 0.452, + 0.484 + ], + "angle": 0, + "content": "
VAE\nVAE donwsample\nlatent channelSD-VAE-f8d4-ft-ema\n8\n4
optimizerAdamW [22]
base learning rate1e-4
weight decay0.0
batch size256
learning rate scheduleconstant
augmentationcenter crop
diffusion samplerEuler-ODE
diffusion steps250
evaluation suiteADM [10]
" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.507, + 0.336, + 0.523 + ], + "angle": 0, + "content": "C. Linear flow and Diffusion" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.532, + 0.385, + 0.548 + ], + "angle": 0, + "content": "Given the SDE forward and reverse process:" + }, + { + "type": "equation", + "bbox": [ + 0.108, + 0.554, + 0.482, + 0.571 + ], + "angle": 0, + "content": "\\[\nd \\boldsymbol {x} _ {t} = f (t) \\boldsymbol {x} _ {t} \\mathrm {d} t + g (t) \\mathrm {d} \\boldsymbol {w} \\tag {9}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.108, + 0.574, + 0.482, + 0.591 + ], + "angle": 0, + "content": "\\[\nd \\boldsymbol {x} _ {t} = [ f (t) \\boldsymbol {x} _ {t} - g (t) ^ {2} \\nabla_ {\\boldsymbol {x}} \\log p (\\boldsymbol {x} _ {t}) ] d t + g (t) d \\boldsymbol {w} \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.598, + 0.483, + 0.642 + ], + "angle": 0, + "content": "A corresponding deterministic process exists with trajectories sharing the same marginal probability densities of reverse SDE." + }, + { + "type": "equation", + "bbox": [ + 0.148, + 0.645, + 0.483, + 0.674 + ], + "angle": 0, + "content": "\\[\nd \\boldsymbol {x} _ {t} = [ f (t) \\boldsymbol {x} _ {t} - \\frac {1}{2} g (t) ^ {2} \\nabla_ {\\boldsymbol {x} _ {t}} \\log p (\\boldsymbol {x} _ {t}) ] d t \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.679, + 0.483, + 0.706 + ], + "angle": 0, + "content": "Given \\( x_{t} = \\alpha_{t}x_{data} + \\sigma \\epsilon \\). The traditional diffusion model learns:" + }, + { + "type": "equation", + "bbox": [ + 0.206, + 0.705, + 0.482, + 0.734 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {\\boldsymbol {x} _ {t}} \\log p (\\boldsymbol {x} _ {t}) = - \\frac {\\epsilon}{\\sigma (t)} \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.735, + 0.483, + 0.765 + ], + "angle": 0, + "content": "The flow-matching framework actually learns the following:" + }, + { + "type": "equation", + "bbox": [ + 0.238, + 0.773, + 0.482, + 0.806 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\boldsymbol {v} _ {t} = \\dot {\\alpha} x + \\dot {\\sigma} \\epsilon (13) \\\\ = x - \\epsilon (14) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.814, + 0.482, + 0.843 + ], + "angle": 0, + "content": "Here we will demonstrate in flow-matching, the \\( \\boldsymbol{v}_t \\) prediction is actually as same as the reverse ode:" + }, + { + "type": "equation", + "bbox": [ + 0.178, + 0.851, + 0.482, + 0.899 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\dot {\\alpha} x + \\dot {\\sigma} \\epsilon (15) \\\\ = f (t) \\boldsymbol {x} _ {t} - \\frac {1}{2} g (t) ^ {2} \\nabla_ {\\boldsymbol {x} _ {t}} \\log p (\\boldsymbol {x} _ {t}) (16) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.533, + 0.092, + 0.838, + 0.107 + ], + "angle": 0, + "content": "Let us start by expanding the reverse ode first." + }, + { + "type": "equation", + "bbox": [ + 0.547, + 0.124, + 0.905, + 0.223 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} f (t) \\boldsymbol {x} _ {t} - \\frac {1}{2} g (t) ^ {2} \\nabla_ {\\boldsymbol {x} _ {t}} \\log p (\\boldsymbol {x} _ {t}) (17) \\\\ = f (t) (\\alpha (t) \\boldsymbol {x} _ {\\text {d a t a}} + \\sigma (t) \\epsilon) - \\frac {1}{2} g (t) ^ {2} \\left[ - \\frac {\\epsilon}{\\sigma (t)} \\right] (18) \\\\ = f (t) \\alpha (t) \\boldsymbol {x} _ {\\text {d a t a}} + (f (t) \\sigma (t) + \\frac {1}{2} \\frac {g (t) ^ {2}}{\\sigma (t)}) \\epsilon (19) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.533, + 0.243, + 0.855, + 0.258 + ], + "angle": 0, + "content": "To prove Eq. (16), we needs to demonstrate that:" + }, + { + "type": "equation", + "bbox": [ + 0.625, + 0.277, + 0.905, + 0.293 + ], + "angle": 0, + "content": "\\[\n\\dot {\\alpha} (t) = f _ {t} \\alpha (t) \\tag {20}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.627, + 0.297, + 0.905, + 0.33 + ], + "angle": 0, + "content": "\\[\n\\dot {\\sigma} (t) = f _ {t} \\sigma (t) + \\frac {1}{2} \\frac {g _ {t} ^ {2}}{\\sigma (t)}. \\tag {21}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.352, + 0.905, + 0.397 + ], + "angle": 0, + "content": "Here, let us derive the relation between \\( f_{t} \\) and \\( \\alpha(t) \\), \\( \\dot{\\alpha}(t) \\). We donate \\( x_{data}(t) = \\alpha(t)x_{data} \\) is the remain component of \\( x_{data} \\) in \\( x_{t} \\), it is easy to find that:" + }, + { + "type": "equation", + "bbox": [ + 0.629, + 0.417, + 0.905, + 0.433 + ], + "angle": 0, + "content": "\\[\nd \\boldsymbol {x} _ {\\text {d a t a}} (t) = f _ {t} \\boldsymbol {x} _ {\\text {d a t a}} (t) d t \\tag {22}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.608, + 0.435, + 0.905, + 0.451 + ], + "angle": 0, + "content": "\\[\nd \\left(\\alpha (t) x _ {d a t a}\\right) = f _ {t} \\alpha (t) x _ {d a t a} d t \\tag {23}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.657, + 0.455, + 0.905, + 0.47 + ], + "angle": 0, + "content": "\\[\nd \\alpha (t) = f _ {t} \\alpha (t) d t \\tag {24}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.49, + 0.652, + 0.505 + ], + "angle": 0, + "content": "So, Eq. (20) is right." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.508, + 0.906, + 0.554 + ], + "angle": 0, + "content": "Based on the above equation, we will demonstrate the relation of \\( g_{t}, f_{t} \\) with \\( \\sigma(t) \\). Note that Gaussian noise has nice additive properties." + }, + { + "type": "equation", + "bbox": [ + 0.61, + 0.573, + 0.905, + 0.591 + ], + "angle": 0, + "content": "\\[\na \\epsilon_ {1} + b \\epsilon_ {2} \\in \\mathcal {N} \\left(0, \\sqrt {a ^ {2} + b ^ {2}}\\right) \\tag {25}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.61, + 0.906, + 0.675 + ], + "angle": 0, + "content": "Let us start with the gaussian noise component \\(\\epsilon(t)\\) calculation, reaching at \\(t\\), every noise addition at \\(s \\in [0, t]\\) while been decayed by a factor of \\(\\frac{\\alpha(t)}{\\alpha(s)}\\). Thus, the mixed Gaussian noise will have a std variance \\(\\sigma(t)\\) of:" + }, + { + "type": "equation", + "bbox": [ + 0.601, + 0.694, + 0.905, + 0.735 + ], + "angle": 0, + "content": "\\[\n\\sigma (t) = \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {\\alpha (t)}{\\alpha (s)}\\right) ^ {2} g _ {s} ^ {2} \\right] d s\\right)} \\tag {26}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.602, + 0.738, + 0.905, + 0.779 + ], + "angle": 0, + "content": "\\[\n\\sigma (t) = \\alpha (t) \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {s}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)} \\tag {27}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.795, + 0.905, + 0.826 + ], + "angle": 0, + "content": "After obtaining the relation of \\( f_{t}, g_{t} \\) and \\( \\alpha(t), \\sigma(t) \\), we derive \\( \\dot{\\alpha}(t) \\) and \\( \\dot{\\sigma}(t) \\) with above conditions:" + }, + { + "type": "equation", + "bbox": [ + 0.628, + 0.845, + 0.905, + 0.879 + ], + "angle": 0, + "content": "\\[\n\\dot {\\alpha} (t) = f _ {t} \\exp \\left[ \\int_ {0} ^ {t} f _ {s} d s \\right] \\tag {28}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.63, + 0.881, + 0.905, + 0.898 + ], + "angle": 0, + "content": "\\[\n\\dot {\\alpha} (t) = f _ {t} \\alpha (t) \\tag {29}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.091, + 0.092, + 0.38, + 0.107 + ], + "angle": 0, + "content": "As for \\(\\dot{\\sigma} (t)\\) , it is quit complex but not hard:" + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.113, + 0.495, + 0.178 + ], + "angle": 0, + "content": "\\[\n\\dot {\\sigma} (t) = \\dot {\\alpha} (t) \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)} + \\alpha (t) \\frac {\\frac {1}{2} \\frac {g _ {t} ^ {2}}{\\alpha (t)}}{\\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} g _ {s} ^ {2} \\right] d s\\right)}} \\tag {30}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.182, + 0.504, + 0.247 + ], + "angle": 0, + "content": "\\[\n\\dot {\\sigma} (t) = \\left(f _ {t} \\alpha (t)\\right) \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)} + \\alpha (t) \\frac {\\frac {1}{2} \\frac {g _ {t} ^ {2}}{\\alpha^ {2} (t)}}{\\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)}} \\tag {31}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.251, + 0.491, + 0.315 + ], + "angle": 0, + "content": "\\[\n\\dot {\\sigma} (t) = f _ {t} \\alpha (t) \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)} + \\frac {\\frac {1}{2} g _ {t} ^ {2}}{\\alpha (t) \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)}} \\tag {32}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.318, + 0.484, + 0.35 + ], + "angle": 0, + "content": "\\[\n\\dot {\\sigma} (t) = f _ {t} \\sigma (t) + \\frac {1}{2} \\frac {g t}{\\sigma (t)} \\tag {33}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.354, + 0.228, + 0.37 + ], + "angle": 0, + "content": "So, Eq. (21) is right." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.381, + 0.402, + 0.399 + ], + "angle": 0, + "content": "D. Proof of Spectrum Autoregressive" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.406, + 0.484, + 0.466 + ], + "angle": 0, + "content": "Given the noise scheduler \\(\\{\\alpha_{t},\\sigma_{t}\\}\\), the clean data \\(\\pmb{x}_{\\mathrm{data}}\\) and Gaussian noise \\(\\epsilon\\). Denote \\(K_{freq}\\) as the maximum frequency of the clean data \\(\\pmb{x}_{\\mathrm{data}}\\). The noisy latent \\(x_{t}\\) at timestep \\(t\\) has been defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.217, + 0.475, + 0.482, + 0.49 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {x} _ {t} = \\alpha_ {t} \\boldsymbol {x} _ {\\text {d a t a}} + \\sigma_ {t} \\boldsymbol {\\epsilon} \\tag {34}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.496, + 0.484, + 0.525 + ], + "angle": 0, + "content": "The spectrum magnitude \\( c_{i} \\) of \\( x_{t} \\) on DCT basics \\( \\mathbf{u}_{i} \\) follows:" + }, + { + "type": "equation", + "bbox": [ + 0.184, + 0.531, + 0.297, + 0.55 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {c} _ {i} = \\mathbb {E} _ {\\epsilon} \\left[ \\boldsymbol {u} _ {i} ^ {T} \\boldsymbol {x} _ {t} \\right] ^ {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.552, + 0.39, + 0.571 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {c} _ {i} = \\mathbb {E} _ {\\epsilon} [ \\boldsymbol {u} _ {i} ^ {T} (\\alpha_ {t} \\boldsymbol {x} _ {d a t a} + \\sigma_ {t} \\boldsymbol {\\epsilon}) ] ^ {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.577, + 0.484, + 0.607 + ], + "angle": 0, + "content": "Recall that the spectrum magnitude of Gaussian noise \\(\\epsilon\\) is uniformly distributed." + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.626, + 0.495, + 0.646 + ], + "angle": 0, + "content": "\\[\n\\pmb {c} _ {i} = [ \\alpha_ {t} \\pmb {u} _ {i} ^ {T} \\pmb {x} _ {d a t a} ] ^ {2} + 2 \\alpha_ {t} \\sigma_ {t} \\mathbb {E} _ {\\epsilon} [ \\pmb {u} _ {i} ^ {T} \\pmb {x} _ {d a t a} \\pmb {u} _ {i} ^ {T} \\epsilon ] + \\sigma_ {t} ^ {2} \\mathbb {E} _ {\\epsilon} [ \\pmb {u} _ {i} ^ {T} \\epsilon ] ^ {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.091, + 0.647, + 0.323, + 0.666 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {c} _ {i} = \\left[ \\alpha_ {t} \\boldsymbol {u} _ {i} ^ {T} \\boldsymbol {x} _ {\\text {d a t a}} \\right] ^ {2} + \\sigma_ {t} ^ {2} \\mathbb {E} _ {\\boldsymbol {\\epsilon}} \\left[ \\boldsymbol {u} _ {i} ^ {T} \\boldsymbol {\\epsilon} \\right] ^ {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.093, + 0.668, + 0.273, + 0.687 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {c} _ {i} = \\alpha_ {t} ^ {2} \\left[ \\boldsymbol {u} _ {i} ^ {T} \\boldsymbol {x} _ {\\text {d a t a}} \\right] ^ {2} + \\sigma_ {t} ^ {2} \\lambda\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.693, + 0.484, + 0.755 + ], + "angle": 0, + "content": "if \\(\\sigma_t^2\\lambda\\) has bigger value than \\([\\alpha_t\\pmb{u}_i^T\\pmb{x}_{data}]^2\\), the spectrum magnitude \\(\\pmb{c}_i\\) on DCT basics \\(\\pmb{u}_i\\) will be canceled, thus the maximal remaining frequency \\(f_{max}(t)\\) of original data in \\(\\pmb{x}_t\\) follows:" + }, + { + "type": "equation", + "bbox": [ + 0.126, + 0.759, + 0.484, + 0.801 + ], + "angle": 0, + "content": "\\[\nf _ {\\max } (t) > \\min \\left(\\left(\\frac {\\alpha_ {t} \\boldsymbol {u} _ {i} ^ {T} \\boldsymbol {x} _ {\\text {d a t a}}}{\\sigma_ {t} \\lambda}\\right) ^ {2}, K _ {\\text {f r e q}}\\right) \\tag {35}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.808, + 0.484, + 0.859 + ], + "angle": 0, + "content": "Though \\(\\frac{\\alpha_t\\pmb{u}_i^T\\pmb{x}_{data}}{\\sigma_t\\lambda}^2\\) depends on the dataset. Here, we directly suppose it as a constant 1. And replace \\(\\alpha = t\\) and \\(\\sigma = 1 - t\\) in above equation:" + }, + { + "type": "equation", + "bbox": [ + 0.159, + 0.864, + 0.484, + 0.906 + ], + "angle": 0, + "content": "\\[\nf _ {\\max } (t) > \\min \\left(\\left(\\frac {t}{1 - t}\\right) ^ {2}, K _ {f r e q}\\right) \\tag {36}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.091, + 0.759, + 0.108 + ], + "angle": 0, + "content": "E. Linear multisteps method" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.115, + 0.906, + 0.176 + ], + "angle": 0, + "content": "We conduct targeted experiment on SiT-XL/2 with Adams-Bashforth like linear multistep solver; To clarify, we did not employ this powerful solver for our DDT models in all tables across the main paper." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.176, + 0.906, + 0.207 + ], + "angle": 0, + "content": "The reverse ode of the diffusion models tackles the following integral:" + }, + { + "type": "equation", + "bbox": [ + 0.602, + 0.217, + 0.906, + 0.252 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {x} _ {i + 1} = \\boldsymbol {x} _ {i} + \\int_ {t _ {i}} ^ {t _ {i + 1}} \\boldsymbol {v} _ {\\theta} (\\boldsymbol {x} _ {t}, t) d t \\tag {37}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.263, + 0.906, + 0.295 + ], + "angle": 0, + "content": "The classic Euler method employs \\( \\pmb{v}_{\\theta}(\\pmb{x}_i, t_i) \\) as an estimate of \\( \\pmb{v}_{\\theta}(\\pmb{x}_t, t) \\) throughout the interval \\( [t_i, t_{i+1}] \\)" + }, + { + "type": "equation", + "bbox": [ + 0.591, + 0.306, + 0.905, + 0.323 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {x} _ {i + 1} = \\boldsymbol {x} _ {i} + \\left(t _ {i + 1} - t _ {i}\\right) \\boldsymbol {v} _ {\\theta} \\left(\\boldsymbol {x} _ {i}, t _ {i}\\right). \\tag {38}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.334, + 0.906, + 0.395 + ], + "angle": 0, + "content": "The most classic multi-step solver Adams-Bashforth method (deemed as Adams for brevity) incorporates the Lagrange polynomial to improve the estimation accuracy with previous predictions." + }, + { + "type": "equation", + "bbox": [ + 0.515, + 0.406, + 0.809, + 0.449 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {v} _ {\\theta} (\\boldsymbol {x} _ {t}, t) = \\sum_ {j = 0} ^ {i} (\\prod_ {k = 0, k \\neq j} ^ {i} \\frac {t - t _ {k}}{t _ {j} - t _ {k}}) \\boldsymbol {v} _ {\\theta} (\\boldsymbol {x} _ {j}, t _ {j})\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.543, + 0.452, + 0.903, + 0.495 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {x} _ {i + 1} \\approx \\boldsymbol {x} _ {i} + \\int_ {t _ {i}} ^ {t _ {i + 1}} \\sum_ {j = 0} ^ {i} (\\prod_ {k = 0, k \\neq j} ^ {i} \\frac {t - t _ {k}}{t _ {j} - t _ {k}}) \\boldsymbol {v} _ {\\theta} (\\boldsymbol {x} _ {j}, t _ {j}) d t\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.542, + 0.498, + 0.906, + 0.541 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {x} _ {i + 1} \\approx \\boldsymbol {x} _ {i} + \\sum_ {j = 0} ^ {i} \\boldsymbol {v} _ {\\theta} (\\boldsymbol {x} _ {j}, t _ {j}) \\int_ {t _ {i}} ^ {t _ {i + 1}} \\left(\\prod_ {k = 0, k \\neq j} ^ {i} \\frac {t - t _ {k}}{t _ {j} - t _ {k}}\\right) d t\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.553, + 0.906, + 0.617 + ], + "angle": 0, + "content": "Note that \\(\\int_{t_i}^{t_{i + 1}}\\left(\\prod_{k = 0,k\\neq j}^i\\frac{t - t_k}{t_j - t_k}\\right)dt\\) of the Lagrange polynomial can be pre-integrated into a constant coefficient, resulting in only naive summation being required for ODE solving." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.63, + 0.741, + 0.647 + ], + "angle": 0, + "content": "F. Classifier free guidance." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.655, + 0.907, + 0.838 + ], + "angle": 0, + "content": "As classifier-free guidance significantly impacts the performance of diffusion models. Traditional classifier-free guidance improves performance at the cost of decreased diversity. Interval guidance is recently been adopted by REPA[52] and Causalfusion[9]. It applies classifier-free guidance only to the high-frequency generation phase to preserve the diversity. We sweep different classifier-free guidance strength with selected intervals. Our DDT-XL/2 achieves the best performance with interval [0.3, 1] with a classifier-free guidance of 2. Recall that we donate \\( t = 0 \\) as the pure noise timestep while REPA[52] use \\( t = 1 \\), thus this exactly corresponds to the [0, 0.7] interval in REPA[52]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.176, + 0.34, + 0.426, + 0.352 + ], + "angle": 0, + "content": "Classifier-free guidance with intervals" + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.352, + 0.476, + 0.578 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.593, + 0.483, + 0.663 + ], + "angle": 0, + "content": "Figure 9. FID10K of DDT-XL/2 with different Classifier free guidance strength and guidance intervals. We sweep different classifier-free guidance strength with selected intervals. Our DDT-XL/2 achieves the best performance with interval [0.3, 1] with a classifier-free guidance of 2." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "13" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05741/97d4b9fa-4704-4c9a-9165-90317d2d4443_origin.pdf b/data/2025/2504_05xxx/2504.05741/97d4b9fa-4704-4c9a-9165-90317d2d4443_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..89a918ddc3f2b5b452e83c02664642801cb2df42 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/97d4b9fa-4704-4c9a-9165-90317d2d4443_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3f2bc4752765cfd254c12a453d01c77f0017658e385dccfaaba093bb5a6cec1 +size 1846574 diff --git a/data/2025/2504_05xxx/2504.05741/full.md b/data/2025/2504_05xxx/2504.05741/full.md new file mode 100644 index 0000000000000000000000000000000000000000..fc80324612765b3851ac5cbb72a1862993c8cb38 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/full.md @@ -0,0 +1,548 @@ +# DDT: Decoupled Diffusion Transformer + +Shuai Wang1 Zhi Tian2 Weilin Huang2 Limin Wang1, * +1Nanjing University 2ByteDance Seed Vision +https://github.com/MCG-NJU/DDT + +![](images/0b0fcf7f95fe250a8fe6bbf44a1d260920fff8418690e8baac77374ffe3abe42.jpg) +(a) Our Decoupled Diffusion Transformer + +![](images/48dcf09eaef0520cfce1c2c76b2f54623920e1af72057912b2c72e38de56a625.jpg) +(b) Conventional Diffusion Transformer + +![](images/fc9e0189f4b580f93b7b09e6dc65626e313ee2035d702444275ea38c70f2bfb6.jpg) +(c) FID compared with Other Diffusion Models +Figure 1. Our decoupled diffusion transformer (DDT-XL/2) achieves a SoTA 1.31 FID under 256 epochs. Our decoupled diffusion transformer models incorporate a condition encoder to extract semantic self-conditions and a velocity decoder to decode velocity. + +# Abstract + +Diffusion transformers have demonstrated remarkable generation quality, albeit requiring longer training iterations and numerous inference steps. In each denoising step, diffusion transformers encode the noisy inputs to extract the lower-frequency semantic component and then decode the higher frequency with identical modules. This scheme creates an inherent optimization dilemma: encoding low-frequency semantics necessitates reducing high-frequency components, creating tension between semantic encoding and high-frequency decoding. To resolve this challenge, we propose a new Decoupled Diffusion Transformer (DDT), with a decoupled design of a dedicated condition encoder for semantic extraction alongside a specialized velocity decoder. Our experiments reveal that a more substantial encoder yields performance improvements as model size increases. For ImageNet $256 \times 256$ , Our DDT-XL/2 achieves a new state-of-the-art performance of 1.31 FID (nearly $4 \times$ faster training convergence compared to previous diffusion transformers). For ImageNet $512 \times 512$ , Our DDT-XL/2 achieves a new state-of-the-art FID of 1.28. Additionally, as a beneficial by-product, our decoupled architecture enhances inference speed by enabling the sharing self + +condition between adjacent denoising steps. To minimize performance degradation, we propose a novel statistical dynamic programming approach to identify optimal sharing strategies. + +# 1. Introduction + +Image generation is a fundamental task in computer vision research, which aims at capturing the inherent data distribution of original image datasets and generating high-quality synthetic images through distribution sampling. Diffusion models [19, 21, 29, 30, 41] have recently emerged as highly promising solutions to learn the underlying data distribution in image generation, outperforming the GAN-based models [3, 40] and Auto-Regressive models [5, 43, 51]. + +The diffusion forward process gradually adds Gaussian noise to the pristine data following an SDE forward schedule [19, 21, 41]. The denoising process learns the score estimation from this corruption process. Once the score function is accurately learned, data samples can be synthesized by numerically solving the reverse SDE [21, 29, 30, 41]. + +Diffusion Transformers [32, 36] introduce the transformer architecture into diffusion models to replace the traditionally dominant UNet-based model [2, 10]. Empirical evidence suggests that, given sufficient training iterations, diffusion transformers outperform conventional approaches even without relying on long residual connections [36]. Nevertheless, their slow convergence rate still poses great challenge for developing new models due to the high cost. + +In this paper, we want to tackle the aforementioned major disadvantages from a model design perspective. Classic computer vision algorithms [4, 17, 23] strategically employ encoder-decoder architectures, prioritizing large encoders for rich feature extraction and lightweight decoders for efficient inference, while contemporary diffusion models predominantly rely on conventional decoder-only structures. We systematically investigate the underexplored potential of decoupled encoder-decoder designs in diffusion transformers, by answering the question of can decoupled encoder-decoder transformer unlock the capability of accelerated convergence and enhanced sample quality? + +Through investigation experiments, we conclude that the plain diffusion transformer has an optimization dilemma between abstract structure information extraction and detailed appearance information recovery. Further, the diffusion transformer is limited in extracting semantic representation due to the raw pixel supervision [28, 52, 53]. To address this issue, we propose a new architecture to explicitly decouple low-frequency semantic encoding and high-frequency detailed decoding through a customized encoder-decoder design. We call this encoder-decoder diffusion transformer model as DDT (Decoupled Diffusion Transformer). DDT incorporates a condition encoder to extract semantic self-condition features. The extracted self-condition is fed into a velocity decoder along with the noisy latent to regress the velocity field. To maintain the local consistency of self-condition features of adjacent steps, we employ direct supervision of representation alignment and indirect supervision from the velocity regression loss of the decoder. + +In the ImageNet $256 \times 256$ dataset, using the traditional off-shelf VAE [38], our decoupled diffusion transformer (DDT-XL/2) model achieves the state-of-the-art performance of 1.31 FID with interval guidance under only 256 epochs, approximately $4 \times$ training acceleration compared to REPA [52]. In the ImageNet $512 \times 512$ dataset, our DDT-XL/2 model achieves 1.28 FID within 500K finetuning steps. + +Furthermore, our DDT achieves strong local consistency on its self-condition feature from the encoder. This property can significantly boost the inference speed by sharing the self-condition between adjacent steps. We formulate the optimal encoder sharing strategy solving as a classic minimal sum path problem by minimizing the performance drop of sharing self-condition among adjacent steps. We propose + +a statistic dynamic programming approach to find the optimal encoder sharing strategy with negligible second-level time cost. Compared with the naive uniform sharing, our dynamic programming delivers a minimal FID drop. Our contributions are summarized as follows. + +- We propose a new decoupled diffusion transformer model, which consists of a condition encoder and a velocity decoder. +- We propose statistic dynamic programming to find the optimal self-condition sharing strategy to boost inference speed while keeping minimal performance downgradation. +- In the ImageNet $256 \times 256$ dataset, using tradition SDf8d4 VAE, our decoupled diffusion transformer (DDT-XL/2) model achieves the SoTA 1.31 FID with interval guidance under only 256 epochs, approximately $4 \times$ training acceleration compared to REPA [52]. +- In the ImageNet $512 \times 512$ dataset, our DDT-XL/2 model achieves the SoTA 1.28 FID, outperforming all previous methods with a significant margin. + +# 2. Related Work + +Diffusion Transformers. The pioneering work of DiT [36] introduced transformers into diffusion models to replace the traditionally dominant UNet architecture [2, 10]. Empirical evidence demonstrates that given sufficient training iterations, diffusion transformers outperform conventional approaches even without relying on long residual connections. SiT [32] further validated the transformer architecture with linear flow diffusion. Following the simplicity and scalability of the diffusion transformer [32, 36], SD3 [12], Lumina [54], and PixArt [6, 7] introduced the diffusion transformer to more advanced text-to-image areas. Moreover, recently, diffusion transformers have dominated the text-to-video area with substantiated visual and motion quality [1, 20, 24]. Our decoupled diffusion transformer (DDT) presents a new variant within the diffusion transformer family. It achieves faster convergence by decoupling the low-frequency encoding and the high-frequency decoding. + +Fast Diffusion Training. To accelerate the training efficiency of diffusion transformers, recent advances have pursued multi-faceted optimizations. Operator-centric approaches [13, 45, 48, 49] leverage efficient attention mechanisms: linear-attention variants [13, 45, 49] reduced quadratic complexity to speed up training, while sparse-attention architectures [48] prioritized sparsely relevant token interactions. Resampling approaches [12, 16] proposed lognorm sampling [12] or loss reweighting [16] techniques to stabilize training dynamics. Representation learning enhancement approaches integrate external inductive biases: + +![](images/90bb434a635dade7122ba293d4cd70c1b69caa74dee31e00757ff64653720253.jpg) + +![](images/4205adae321fff2cd11e1a6112d2e8212e500eb09240ca6578d94e76fa6abbc7.jpg) + +![](images/8dd7b645cf3beb6c96a28f5e0aef4fa66291c343736d43a91f44bcb8296a057a.jpg) + +![](images/98ee0e4b3bd272e2bf413b0c1ce26416c41201ccc216b837c3587047d88462d8.jpg) + +![](images/5b111d9945677c3832d9ac16cd82db57d76f31a097ccf022f7c4a6509822f31f.jpg) + +![](images/240994ca06ad9c15a377423cda559735d73e4fe5a793b86a2070d7f7a5a19be9.jpg) + +![](images/3fcb27ffe48aea1ccedcd25a827cfac28fb0c50ec36b20a02dd8d0867f62749c.jpg) + +![](images/1848d6f6c6f09a80d9580fe7a0472334470d7402a8f0f9ebc75f0e54f77cd2c0.jpg) + +![](images/d525d165270ddca02ea8d8ee5b095dd81e4b95d3eea8b81199d47438d2761268.jpg) + +![](images/8f5b86f4867d6ab8b42c2472d952666b81efbbf0cd189ee083b33702d9a4da62.jpg) + +![](images/a9a9a05c460fbfeb08572fc2b2eba0ff359e304716e9d7dfb2b8424d8103dd3e.jpg) + +![](images/844ef683faa78a606fedc9e3b46065adc7dfdff3ac5e76d82e994d3fa16edd1f.jpg) + +![](images/aaea39edf7b8250bfea02aa21ada7ea2b7adefa9180fb407fa2fe28f0a5ac9d0.jpg) +Figure 2. Selected $256 \times 256$ and $512 \times 512$ resolution samples. Generated from DDT-XL/2 trained on ImageNet $256 \times 256$ resolution and ImageNet $512 \times 512$ resolution with CFG = 4.0. + +![](images/3eb3bb847dbdbe1a2a47a22afa09e43d4aa5684a5ada149fd7f1037369596351.jpg) + +![](images/bfb0f79f9a742e80bcaf710e1e90883e1635dc632020949ed444d444d088c5f7.jpg) + +![](images/f5b1120ec986332b1b040332bca193bfbb048be5c316f804ed609cd977122e83.jpg) + +![](images/1938f29b7db82456a19fe9cf4e7e990304681543c87d5423f264908aa8700184.jpg) + +![](images/99a12143a9ba00a6686a3a5f3c2d8ab84698fe37bec78394975ec8ead2901f90.jpg) + +![](images/0af46efc8109063459d92e3ac952af1d0db4c1f4374abc2379210c2a8158df0c.jpg) + +![](images/9a59fb5c4833508f282db8aecf20b1a86b1c6cb5a7ed19663e45728fc0e7c7f4.jpg) + +![](images/4e983e2fbbacb92b9637d32f798ca1cd2a3d14d6e611445b59a01bd33e0f2f41.jpg) +Figure 3. The reverse-SDE process (generation) of SiT-XL/2 in $x$ space. There is a clear generation process from low frequency to high frequency. Most of the time is spent on generating high-frequency details (from $t = 0.4$ to $t = 1.0$ ). + +REPA [52], RCG [27] and DoD [53] borrowed vision-specific priors into diffusion training, while masked modeling techniques [14, 15] strengthened spatial reasoning by enforcing structured feature completion during denoising. Collectively, these strategies address computational, sampling, and representational bottlenecks. + +# 3. Preliminary Analysis + +Linear-based flow matching [29, 30, 32] represents a specialized family of diffusion models that we focus on as our primary analytical subject due to its simplicity and efficiency. For the convenience of discussion, in certain situations, diffusion and flow-matching will be used interchange- + +![](images/6ff463028d0d43933dd6eb9fe5122b58039284839b9e9f78028cca72c059078a.jpg) +Figure 4. The FID50K metric of SiT-XL/2 for different timeshift values. We employ a 2-nd order Adams-like solver to collect the performance. Allocating more computation at noisy steps significantly improves the performance. + +![](images/f15e35020f438296ec0f20c07856070badcae024af68719216a172947aa89de8.jpg) + +ably. In this framework, $t = 0$ corresponds to the pure noise timestep. + +As illustrated in Fig. 3, diffusion models perform autoregressive refinement on spectral components [11, 37]. The diffusion transformer encodes the noisy latent to capture lower-frequency semantics before decoding higher-frequency details. However, this semantics encoding process inevitably attenuates high-frequency information, creating an optimization dilemma. This observation motivates our proposal to decouple the conventional decode-only diffusion transformer into an explicit encoder-decoder architecture. + +Lemma 1. For a linear flow-matching noise scheduler at timestep $t$ , let us denote $K_{\text{freq}}$ as the maximum frequency of the clean data $\mathbf{x}_{\text{data}}$ . The maximum retained frequency + +in the noisy latent satisfies: + +$$ +f _ {\max } (t) > \min \left(\left(\frac {t}{1 - t}\right) ^ {2}, K _ {\text {f r e q}}\right). \tag {1} +$$ + +Lemma 1 is directly borrowed from [11, 37], we place the proof of Lemma 1 in Appendix. According to Lemma 1, as $t$ increases to less noisy timesteps, semantic encoding becomes easier (due to noise reduction) while decoding complexity increases (as residual frequencies grow). Consider the worst-case scenario at denoising step $t$ , the diffusion transformer encodes frequencies up to $f_{max}(t)$ , to progress to step $s$ , it must decode a residual frequency of at least $f_{max}(s) - f_{max}(t)$ . Failure to decode these residual frequencies at step $t$ creates a critical bottleneck for progression to subsequent steps. From this perspective, if allocating more of the calculations to more noisy timesteps can lead to an improvement, it means that diffusion transformers struggle with encoding lower frequency to provide semantics. Otherwise, if allocating more of the calculations to less noisy timesteps can lead to an improvement, it means that flow-matching transformers struggle with decoding higher frequency to provide fine details. + +To figure out the bottom-necks of current diffusion models, we conducted a targeted experiment using SiT-XL/2 with a second-order Adams-like linear multistep solver. As shown in Fig. 4, by varying the time-shift values, we demonstrate that allocating more computation to early timesteps improves final performance compared to uniform scheduling. This reveals that diffusion models face challenges in more noisy steps. This leads to a key conclusion: Current diffusion transformers are fundamentally constrained by their low-frequency semantic encoding capacity. This insight motivates the exploration of encoder-decoder architectures with strategic encoder parameter allocation. + +Prior researches further support this perspective. While lightweight diffusion MLP heads demonstrate limited decoding capacity, MAR [28] overcomes this limitation through semantic latents produced by its masked backbones, enabling high-quality image generation. Similarly, REPA [52] enhances low-frequency encoding through alignment with pre-trained vision foundations [35]. + +# 4. Method + +Our decoupled diffusion transformer architecture comprises a condition encoder and a velocity decoder. The condition encoder extracted the low-frequency component from noisy input, class label, and timestep to serve as a self-condition for the velocity decoder; the velocity decoder processed the noisy latent with the self-condition to regress the high-frequency velocity. We train this model using the established linear flow diffusion framework. For brevity, + +we designate our model as DDT (Decoupled Diffusion Transformer). + +# 4.1. Condition Encoder + +The condition encoder mirrors the architectural design and input structure of DiT/SiT with improved micro-design. It is built with interleaved Attention and FFN blocks, without long residual connections. The encoder processes three inputs, the noisy latent $\boldsymbol{x}_t$ , timestep $t$ , and class label $y$ to extract the self-condition feature $\boldsymbol{z}_t$ through a series of stacked Attention and FFN blocks: + +$$ +\boldsymbol {z} _ {t} = \operatorname {E n c o d e r} \left(\boldsymbol {x} _ {t}, t, y\right). \tag {2} +$$ + +Specifically, the noisy latent $\boldsymbol{x}_t$ are patched into continuous tokens and then fed to extract the self-condition $\boldsymbol{z}_t$ with aforementioned encoder blocks. The timestep $t$ and class label $y$ serve as external-conditioning information projected into embedding. These external-condition embeddings are progressively injected into the encoded features of $\boldsymbol{x}_t$ using AdaLN-Zero[36] within each encoder block. + +To maintain local consistency of $z_{t}$ across adjacent timesteps, we adopt the representation alignment technique from REPA [52]. Shown in Eq. (3), this method aligns the intermediate feature $\mathbf{h}_i$ from the $i$ -th layer in the self-mapping encoder with the DINOV2 representation $r_*$ . Consistent to REPA [52], the $h_{\phi}$ is the learnable projection MLP: + +$$ +\mathcal {L} _ {e n c} = 1 - \cos \left(r _ {*}, h _ {\phi} \left(\mathbf {h} _ {\mathbf {i}}\right)\right). \tag {3} +$$ + +This simple regularization accelerates training convergence, as shown in REPA [52], and facilitates local consistency of $\boldsymbol{z}_t$ between adjacent steps. It allows sharing the self-condition $\boldsymbol{z}_t$ produced by the encoder between adjacent steps. Our experiments demonstrate that this encoder-sharing strategy significantly enhances inference efficiency with only negligible performance degradation. + +Additionally, the encoder also receives indirect supervision from the decoder, which we elaborate on later. + +# 4.2. Velocity Decoder + +The velocity decoder adopts the same architectural design as the condition encoder and consists of several stacked interleaved Attention and FFN blocks, akin to DiT/SiT. It takes the noisy latent $\boldsymbol{x}_t$ , timestep $t$ , and self-conditioning $\boldsymbol{z}_t$ as inputs to estimate the velocity $\boldsymbol{v}_t$ . Unlike the encoder, we assume that class label information is already embedded within $\boldsymbol{z}_t$ . Thus, only the external-condition timestep $t$ and self-condition feature $\boldsymbol{z}_t$ are used as condition inputs for the decoder blocks: + +$$ +\boldsymbol {v} _ {t} = \mathbf {D e c o d e r} \left(\boldsymbol {x} _ {t}, t, \boldsymbol {z} _ {t}\right). \tag {4} +$$ + +As demonstrated previously, to further improve consistency of self-condition $z_{t}$ between adjacent steps, we employ + +AdaLN-Zero [36] to inject $\mathbf{z}_t$ into the decoder feature. The decoder is trained with the flow matching loss as shown in Eq. (5): + +$$ +\mathcal {L} _ {d e c} = \mathbb {E} \left[ \int_ {0} ^ {1} \left| \left(\boldsymbol {x} _ {d a t a} - \epsilon\right) - \boldsymbol {v} _ {t} \right| ^ {2} \mathrm {d} t \right]. \tag {5} +$$ + +# 4.3. Sampling acceleration + +By incorporating explicit representation alignment into the encoder and implicit self-conditioning injection into the decoder, we achieve local consistency of $z_{t}$ across adjacent steps during training (shown in Fig. 5). This enables us to share $z_{t}$ within a suitable local range, reducing the computational burden on the self-mapping encoder. + +Formally, given total inference steps $N$ and encoder computation bugets $K$ , thus the sharing ratio is $1 - \frac{K}{N}$ , we define $\Phi$ with $|\Phi| = K$ as the set of timesteps where the self-condition is recalculated, as shown in Equation 6. If the current timestep $t$ is not in $\Phi$ , we reuse the previously computed $z_{t - \Delta t}$ as $z_t$ . Otherwise, we recompute $z_t$ using the encoder and the current noisy latent $x_t$ : + +$$ +\boldsymbol {z} _ {t} = \left\{ \begin{array}{l l} \boldsymbol {z} _ {t - \Delta t}, & \text {i f} t \notin \Phi \\ \mathbf {E n c o d e r} \left(\boldsymbol {x} _ {t}, t, y\right), & \text {i f} t \in \Phi \end{array} \right. \tag {6} +$$ + +Uniform Encoder Sharing. This naive approach recalculate self-condition $z_{t}$ every $\frac{N}{K}$ steps. Previous work, such as DeepCache [33], uses this naive handcrafted uniform $\Phi$ set to accelerate UNet models. However, UNet models, trained solely with a denoising loss and lacking robust representation alignment, exhibit less regularized local consistency in deeper features across adjacent steps compared to our DDT model. Also, we will propose a simple and elegant statistic dynamic programming algorithm to construct $\Phi$ . Our statistic dynamic programming can exploit the optimal $\Phi$ set optimally compared to the naive approaches [33]. + +Statistic Dynamic Programming. We construct the statistic similarity matrix of $z_{t}$ among different steps $\mathbf{S} \in R^{N \times N}$ using cosine distance. The optimal $\Phi$ set would guarantee the total similarity cost $-\sum_{k}^{K} \sum_{i = \Phi_{k}}^{\Phi_{k + 1}} S[\Phi_{k}, i]$ achieves global minimal. This question is a well-formed classic minimal sum path problem, it can be solved by dynamic programming. As shown in Eq. (8), we donate $\mathbf{C}_{i}^{k}$ as cost and $\mathbf{P}_{i}^{k}$ as traced path when $\Phi_{k} = i$ . The state transition function from $\mathbf{C}_{j}^{k - 1}$ to $\mathbf{C}_{i}^{k}$ follows: + +$$ +\mathbf {C} _ {i} ^ {k} = \min _ {j = 0} ^ {i} \left\{\mathbf {C} _ {j} ^ {k - 1} - \Sigma_ {l = j} ^ {i} \mathbf {S} [ j, l ] \right\}. \tag {7} +$$ + +$$ +\mathbf {P} _ {i} ^ {k} = \operatorname {a r g m i n} _ {j = 0} ^ {i} \left\{\mathbf {C} _ {i} ^ {k - 1} - \Sigma_ {l = j} ^ {i} \mathbf {S} [ j, l ] \right\}. \tag {8} +$$ + +After obtaining the cost matrix $\mathbf{C}$ and tracked path $\mathbf{P}$ , the optimal $\Phi$ can be solved by backtracking $\mathbf{P}$ from $\mathbf{P}_N^K$ . + +# 5. Experiment + +We conduct experiments on 256x256 ImageNet datasets. The total training batch size is set to 256. Consistent with methodological approaches such as SiT [32], DiT [36], and REPA [52], we employed the Adam optimizer with a constant learning rate of 0.0001 throughout the entire training process. To ensure a fair comparative analysis, we did not use gradient clipping and learning rate warm-up techniques. Our default training infrastructure consisted of $16 \times$ or $8 \times$ A100 GPUs. For sampling, we take the Euler solver with 250 steps as the default choice. As for the VAE, we take the off-shelf VAE-ft-EMA with a downsample factor of 8 from Huggingface1. We report FID [18], sFID [34], IS [39], Precision and Recall [25]. + +# 5.1. Improved baselines + +Recent architectural improvements such as SwiGLU [46, 47], RoPE [42], and RMSNorm [46, 47] have been extensively validated in the research community [8, 31, 50]. Additionally, lognorm sampling [12] has demonstrated significant benefits for training convergence. Consequently, we developed improved baseline models by incorporating these advanced techniques, drawing inspiration from recent works in the field. The performance of these improved baselines is comprehensively provided in Tab. 2. To validate the reliability of our implementation, we also reproduced the results for REPA-B/2, achieving metrics that marginally exceed those originally reported in the REPA[52]. These reproduction results provide additional confidence in the robustness of our approach. + +The improved baselines in our Tab. 2 consistently outperform their predecessors without REPA. However, upon implementing REPA, performance rapidly approaches a saturation point. This is particularly evident in the XL model size, where incremental technique improvements yield diminishingly small gains. + +# 5.2. Metric comparison with baselines + +We present the performances of different-size models at 400K training steps in Tab. 2. Our diffusion encoder-decoder transformer(DDT) family demonstrates consistent and significant improvements across various model sizes. Our DDT-B/2(8En4De) model exceeds Improved-REPA-B/2 by 2.8 FID gains. Our DDT-XL/2(22En6De) exceeds REPA-XL/2 by 1.3 FID gains. While the decoder-only diffusion transformers approach performance saturation with REPA[52], our DDT models continue to deliver superior results. The incremental technique improvements show diminishing gains, particularly in larger model sizes. However, our DDT models maintain a significant performance advantage, underscoring the effectiveness of our approach. + +
ParamsEpochs256×256, w/o CFG256×256, w/ CFG
FID↓IS↑Pre.↑Rec.↑FID↓IS↑Pre.↑Rec.↑
MAR-B [28]208M8003.48192.40.780.582.31281.70.820.57
CausalFusion [9]368M8005.12166.10.730.661.94264.40.820.59
LDM-4 [38]400M17010.56103.50.710.623.6247.70.870.48
DDT-L (Ours)458M807.98128.10.680.671.64310.50.810.61
MAR-L [28]479M8002.6221.40.790.601.78296.00.810.60
VAVAE [50]675M8002.17205.60.770.651.35295.30.790.65
CausalFusion [9]676M8003.61180.90.750.661.77282.30.820.61
ADM [10]554M40010.94-0.690.634.59186.70.820.52
DiT-XL [36]675M14009.62121.50.670.672.27278.20.830.57
SiT-XL [32]675M14008.3---2.06270.30.820.59
ViT-XL [16]451M4008.10---2.06---
U-ViT-H/2 [2]501M4006.58---2.29263.90.820.57
MaskDiT [14]675M16005.69178.00.740.602.28276.60.800.61
FlowDCN [48]618M4008.36122.50.690.652.00263.10.820.58
RDM [44]553M/5.27153.40.750.621.99260.40.810.58
REPA [52]675M8005.9157.80.700.691.42305.70.800.64
DDT-XL (Ours)675M806.62135.20.690.671.52263.70.780.63
DDT-XL (Ours)675M2566.30146.70.680.681.31308.10.780.62
DDT-XL (Ours)675M4006.27154.70.680.691.26310.60.790.65
+ +Table 1. System performance comparison on ImageNet ${256} \times {256}$ class-conditioned generation. Gray blocks mean the algorithm uses VAE trained or fine-tuned on ImageNet instead of the off-shelf SD-VAE-f8d4-ft-ema. + +
ModelFID↓sFID↓IS↑Prec.↑Rec.↑
SiT-B/2 [32]33.06.4643.70.530.63
REPA-B/2 [52]24.46.4059.90.590.65
REPA-B/2(Reproduced)22.27.5069.10.590.65
DDT-B/2† (8En4De)21.17.8173.00.600.65
Improved-SiT-B/225.16.5458.80.570.64
Improved-REPA-B/219.16.8876.490.600.66
DDT-B/2 (8En4De)16.326.6386.00.620.66
SiT-L/2 [32]18.85.2972.00.640.64
REPA-L/2 [52]10.05.20109.20.690.65
Improved-SiT-L/212.75.4895.70.650.65
Improved-REPA-L/29.35.44116.60.670.66
DDT-L/2 (20En4De)7.985.50128.10.680.67
SiT-XL/2 [32]17.25.0776.520.650.63
REPA-XL/2 [52]7.95.06122.60.700.65
Improved-SiT-XL/210.95.3103.40.660.65
Improved-REPA-XL/28.145.34124.90.680.67
DDT-XL/2 (22En6De)6.624.86135.10.690.67
+ +Table 2. Metrics of $400K$ training steps with different model sizes. All results are reported without classifier-free guidance. gray means metrics are copied from the original paper, otherwise it is produced by our codebase. By default, our DDT models are built on improved baselines. $\mathrm{DDT}^{\dagger}$ means model built on naive baseline without architecture improvement and lognorm sampling, consistent to REPA. Our DDT models consistently outperformed their counterparts. + +# 5.3. System level comparison + +ImageNet $256 \times 256$ . We report the final metrics of DDT-XL/2 (22En6De) and DDT-L/2 (20En4De) at Tab. 1. Our DDT models demonstrate exceptional efficiency, achieving convergence in approximately $\frac{1}{4}$ of the total epochs compared to REPA [52] and other diffusion transformer models. In order to maintain methodological consistency with REPA, we employed the classifier-free guidance with 2.0 in the interval [0.3, 1]. Our models delivered impressive results: DDT-L/2 achieved 1.64 FID, and DDT-XL/2 got 1.52 FID within just 80 epochs. By extending training to 256 epochs—still significantly more efficient than traditional 800-epoch approaches—our DDT-XL/2 established a new state-of-the-art benchmark of 1.31 FID on ImageNet $256 \times 256$ , decisively outperforming previous diffusion transformer methodologies. To extend training to 400 epochs, our DDT-XL/2(22En6De) achieves 1.26 FID, nearly reaching the upper limit of SD-VAE-ft-EMA-f8d4, which has a 1.20 rFID on ImageNet256. + +ImageNet $512 \times 512$ We provide the final metrics of DDT-XL/2 at Tab. 3. To validate the superiority of our DDT model, we take our DDT-XL/2 trained on ImageNet $256 \times$ 256 under 256 epochs as the initialization, fine-tune out DDT-XL/2 on ImageNet $512 \times 512$ for $100K$ steps. We adopt the aforementioned interval guidance [26] and we achieved a remarkable state-of-the-art performance of 1.90 FID, decisively outperforming REPA by a significant 0.28 + +
ImageNet 512 × 512
ModelFID↓sFID↓IS↑Pre.↑Rec.↑
BigGAN-deep [3]8.438.13177.900.880.29
StyleGAN-XL [40]2.414.06267.750.770.52
ADM-G [10]7.726.57172.710.870.42
ADM-G, ADM-U3.855.86221.720.840.53
DiT-XL/2 [36]3.045.02240.820.840.54
SiT-XL/2 [32]2.624.18252.210.840.57
REPA-XL/2 [52]2.084.19274.60.830.58
FlowDCN-XL/2 [48]2.444.53252.80.840.54
DDT-XL/2 (500K)1.284.22305.10.800.63
+ +performance margin. In Tab. 3, some metrics exhibit subtle degradation, we attribute this to potentially insufficient fine-tuning. When allocating more training iterations to DDT-XL/2, it achieves 1.28 FID at 500K steps with CFG3.0 within the time interval [0.3, 1.0]. + +# 5.4. Acceleration by Encoder sharing + +As illustrated in Fig. 5, there is a strong local consistency of the self-condition in our condition encoder. Even $\boldsymbol{z}_{t=0}$ has a strong similarity above 0.8 with $\boldsymbol{z}_{t=1}$ . This consistency provides an opportunity to speed up inference by sharing the encoder between adjacent steps. + +We employed the simple uniform encoder sharing strategy and the new novel statistics dynamic programming strategy. Specifically, for the uniform strategy, we only recalculate the self-condition $z_{t}$ every $K$ steps. For statistics dynamic programming, we solve the aforementioned minimal sum path on the similarity matrix by dynamic programming and recalculate $z_{t}$ according to the solved strategy. As shown in Fig. 6, there is a significant inference speedup nearly without visual quality loss when $K$ is smaller than 6. As shown in Tab. 4, the metrics loss is still marginal, while the inference speedup is significant. The novel statistics dynamic programming slightly outperformed the naive uniform strategy with less FID drop. + +# 5.5. Ablations + +We conduct ablation studies on ImageNet $256 \times 256$ with DDT-B/2 and DDT-L/2. For sampling, we take the Euler solver with 250 steps as the default choice without classifier-free guidance. For training, we train each model with 80 epochs(400k steps), and the batch size is set to 256. + +Encoder-Decoder Ratio we systematically explored ratios ranging from $2:1$ to $5:1$ across different model sizes. + +Table 3. Benchmarking class-conditional image generation on ImageNet $512 \times 512$ . Our DDT-XL/2(512 × 512) is fine-tuned from the same model trained on $256 \times 256$ resolution setting of 1.28M steps. We adopt the interval guidance with interval [0.3, 1] and CFG of 3.0 + +
SharRatioAccΦFID↓sFID↓IS↑Prec.↑Rec.↑
0.001.0×Uniform1.314.62308.10.780.66
0.501.6×Uniform1.314.48300.50.780.65
0.661.9×Uniform1.324.46301.20.780.65
0.752.3×Uniform1.344.43302.70.780.65
0.802.6×Uniform1.364.40303.30.780.64
StatisticDP1.334.37301.70.780.64
0.832.7×Uniform1.374.41302.80.780.64
StatisticDP1.364.35300.30.780.64
0.873.0×Uniform1.424.43302.80.780.64
StatisticDP1.404.35302.40.780.64
+ +Table 4. Metrics of ${400K}$ training steps with different model sizes. All results are reported without classifier-free guidance. gray means metrics are copied from the original paper, otherwise it is produced by our codebase. Our DDT models consistently outperformed its counterparts + +![](images/033d63fcd85c9681fb31fefd5873fed342d5bf14138f45e31589702949bc1243.jpg) +Figure 5. The cosine similarity of self-condition feature $z_{t}$ from encoder between different timesteps. There is a strong correlation between adjacent steps, indicating the redundancy. + +![](images/ee4c2612ac81285275068e95259e898b96d23d847186f0bc23ef8c53d733f70c.jpg) +Figure 6. Sharing the self-condition $z_{t}$ in adjacent steps significant speedup the inference. We tried various sharing frequency configurations. There is marginal visual quality down-gradation when the sharing frequency is reasonable. + +in Fig. 7 and Fig. 8. Our notation $m\mathrm{En}n\mathrm{De}$ represents models with $m$ encoder layers and $n$ decoder layers. The inves + +![](images/407f5b5ff8f50c1416dcfbeafdd2fea4269a384cafb24e536a03e1635a638c3b.jpg) + +![](images/2757235a519a66a5a620a7b238d4836051bbd2024c54007acd28fc92d3a5a40d.jpg) + +![](images/4b8104cb4cd6afc445f9075381ba25c807552c05b2bdf48c9ed38b7d6ca8600f.jpg) + +![](images/444038e1ba194d39dcc047006d8249c0659c38455fcbbb140ca296cf75f66b58.jpg) +Figure 7. The DDT-B/2 built upon Improved-baselines under various Encoder and Decoder layer ratio. DDT-B/2(8En4De) achieves much faster convergence speed and better performance. +Figure 8. The DDT-L/2 built upon Improved-baselines under various Encoder and Decoder layer ratio. DDT-L/2 prefers an unexpected aggressive encoder-decoder ratio DDT-L/2(20En4De) achieves much faster convergence speed and better performance. + +![](images/ead4974e709c35db97c6cbfb7ffa7b61e5d9ea3ee2de853402c30d478dacb855.jpg) + +![](images/0107bf7a532ff9c9a3c73a46031f8f1ad26badac4b3d1c14d2d9de1f95c1568a.jpg) + +tigation experiments in Fig. 7 and Fig. 8 revealed critical insights into architectural optimization. We observed that a larger encoder is beneficial for further improving the performance as the model size increases. For the Base model in Fig. 7, the optimal configuration emerged as 8 encoder layers and 4 decoder layers, delivering superior performance and convergence speed. Notably, the Large model in Fig. 8 exhibited a distinct preference, achieving peak performance with 20 encoder layers and 4 decoder layers, an unexpectedly aggressive encoder-decoder ratio. This unexpected discovery motivates us to scale the layer ratio in DDT-XL/2 to 22 encoder layers and 6 decoders to explore the performance upper limits of diffusion transformers. + +Decoder Block types. In our investigation of decoder block types and their impact on high-frequency decoding performance, we systematically evaluated multiple architectural configurations. Our comprehensive assessment included alternative approaches such as simple $3 \times 3$ convolution blocks and naive MLP blocks. As shown in Tab. 5, the default (Attention with the MLP) setting achieves better results. Thanks to the encoder-decoder design, naive Conv blocks even achieve comparable results. + +# 6. Conclusion + +In this paper, we have introduced a novel Decoupled Diffusion Transformer, which rethinks the optimization dilemma + +
DecoderBlockFID↓sFID↓IS↑Prec.↑Rec.↑
Conv+MLP16.967.3385.10.620.65
MLP+MLP24.137.8965.00.570.65
Attn+MLP16.326.6386.00.620.66
+ +Table 5. Metrics of $400K$ training steps on DDT-B/2(8En4De) with different decoder blocks. All results are reported without classifier-free guidance. The Default Attention + MLP configuration achieves best performance. + +of the traditional diffusion transformer. By decoupling the low-frequency encoding and high-frequency decoding into dedicated components, we effectively resolved the optimization dilemma that has constrained diffusion transformer. Furthermore, we discovered that increasing the encoder capacity relative to the decoder yields increasingly beneficial results as the overall model scale grows. This insight provides valuable guidance for future model scaling efforts. Our experiments demonstrate that our DDT-XL/2 (22En6De) with an unexpected aggressive encoder-decoder layer ratio achieves great performance while requiring only 256 training epochs. This significant improvement in efficiency addresses one of the primary limitations of diffusion models: their lengthy training requirements. The decoupled architecture also presents opportunities for inference optimization through our proposed encoder result sharing mechanism. Our statistical dynamic programming approach for determining optimal sharing strategies enables faster inference while minimizing quality + +degradation, demonstrating that architectural innovations can yield benefits beyond their primary design objectives. + +# References + +[1] Niket Agarwal, Arslan Ali, Maciej Bala, Yogesh Balaji, Erik Barker, Tiffany Cai, Prithvijit Chattopadhyay, Yongxin Chen, Yin Cui, Yifan Ding, et al. Cosmos world foundation model platform for physical ai. arXiv preprint arXiv:2501.03575, 2025. 2 +[2] Fan Bao, Shen Nie, Kaiwen Xue, Yue Cao, Chongxuan Li, Hang Su, and Jun Zhu. All are worth words: A vit backbone for diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22669-22679, 2023. 2, 6 +[3] Andrew Brock, Jeff Donahue, and Karen Simonyan. Large scale gan training for high fidelity natural image synthesis. arXiv preprint arXiv:1809.11096, 2018. 1, 7 +[4] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. 2 +[5] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11315-11325, 2022. 1 +[6] Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al. Pixart- $\cdot$ alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis. arXiv preprint arXiv:2310.00426, 2023. 2 +[7] Junsong Chen, Chongjian Ge, Enze Xie, Yue Wu, Lewei Yao, Xiaozhe Ren, Zhongdao Wang, Ping Luo, Huchuan Lu, and Zhenguo Li. Pixart- $\backslash$ sigma: Weak-to-strong training of diffusion transformer for 4k text-to-image generation. arXiv preprint arXiv:2403.04692, 2024. 2 +[8] Xiangxiang Chu, Jianlin Su, Bo Zhang, and Chunhua Shen. Visionllama: A unified llama interface for vision tasks. arXiv preprint arXiv:2403.00522, 2024. 5 +[9] Chaorui Deng, Deyao Zh, Kunchang Li, Shi Guan, and Haoqi Fan. Causal diffusion transformers for generative modeling. arXiv preprint arXiv:2412.12095, 2024. 6, 12 +[10] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 2, 6, 7, 11 +[11] Sander Dieleman. Diffusion is spectral autoregression, 2024. 3, 4 +[12] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. arXiv preprint arXiv:2403.03206, 2024. 2, 5 +[13] Zhengcong Fei, Mingyuan Fan, Changqian Yu, Debang Li, and Junshi Huang. Diffusion-rwkv: Scaling rwkv-like architectures for diffusion models. arXiv preprint arXiv:2404.04478, 2024. 2 + +[14] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Masked diffusion transformer is a strong image synthesizer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 23164-23173, 2023. 3, 6 +[15] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Masked diffusion transformer is a strong image synthesizer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23164-23173, 2023. 3 +[16] Tiankai Hang, Shuyang Gu, Chen Li, Jianmin Bao, Dong Chen, Han Hu, Xin Geng, and Baining Guo. Efficient diffusion training via min-snr weighting strategy. In Proceedings of the IEEE/CVF international conference on computer vision, pages 7441-7451, 2023. 2, 6 +[17] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollar, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 16000-16009, 2022. 2 +[18] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 5 +[19] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 1 +[20] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. arXiv preprint arXiv:2205.15868, 2022. 2 +[21] Tero Karras, Miika Aittala, Timo Aila, and Samuli Laine. Elucidating the design space of diffusion-based generative models. Advances in Neural Information Processing Systems, 35:26565-26577, 2022. 1 +[22] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 11 +[23] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4015-4026, 2023. 2 +[24] Weijie Kong, Qi Tian, Zijian Zhang, Rox Min, Zuozhuo Dai, Jin Zhou, Jiangfeng Xiong, Xin Li, Bo Wu, Jianwei Zhang, et al. Hunyuanvideo: A systematic framework for large video generative models. arXiv preprint arXiv:2412.03603, 2024. 2 +[25] Tuomas Kynkänniemi, Tero Karras, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Improved precision and recall metric for assessing generative models. Advances in neural information processing systems, 32, 2019. 5 +[26] Tuomas Kynkänniemi, Miika Aittala, Tero Karras, Samuli Laine, Timo Aila, and Jaakko Lehtinen. Applying guidance in a limited interval improves sample and distribution quality in diffusion models. arXiv preprint arXiv:2404.07724, 2024. 6 + +[27] Tianhong Li, Dina Katabi, and Kaiming He. Return of unconditional generation: A self-supervised representation generation method. Advances in Neural Information Processing Systems, 37:125441-125468, 2024. 3 +[28] Tianhong Li, Yonglong Tian, He Li, Mingyang Deng, and Kaiming He. Autoregressive image generation without vector quantization. Advances in Neural Information Processing Systems, 37:56424-56445, 2025. 2, 4, 6 +[29] Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le. Flow matching for generative modeling. arXiv preprint arXiv:2210.02747, 2022. 1, 3 +[30] Xingchao Liu, Chengyue Gong, and Qiang Liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022. 1, 3 +[31] Zeyu Lu, Zidong Wang, Di Huang, Chengyue Wu, Xihui Liu, Wanli Ouyang, and Lei Bai. Fit: Flexible vision transformer for diffusion model. arXiv preprint arXiv:2402.12376, 2024.5 +[32] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. Sit: Exploring flow and diffusion-based generative models with scalable interpolant transformers. arXiv preprint arXiv:2401.08740, 2024. 2, 3, 5, 6, 7 +[33] Xinyin Ma, Gongfan Fang, and Xinchao Wang. Deepcache: Accelerating diffusion models for free. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 15762-15772, 2024. 5 +[34] Charlie Nash, Jacob Menick, Sander Dieleman, and Peter W Battaglia. Generating images with sparse representations. arXiv preprint arXiv:2103.03841, 2021. 5 +[35] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 4 +[36] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 2, 4, 5, 6, 7 +[37] Severi Rissanen, Markus Heinonen, and Arno Solin. Generative modelling with inverse heat dissipation. arXiv preprint arXiv:2206.13397, 2022. 3, 4 +[38] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2, 6 +[39] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 5 +[40] Axel Sauer, Katja Schwarz, and Andreas Geiger. Styleganx1: Scaling stylegan to large diverse datasets. In ACM SIGGRAPH 2022 conference proceedings, pages 1-10, 2022. 1, 7 +[41] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based + +generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020. 1 +[42] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. 5 +[43] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024. 1 +[44] Jiayan Teng, Wendi Zheng, Ming Ding, Wenyi Hong, Jianqiao Wangni, Zhuoyi Yang, and Jie Tang. Relay diffusion: Unifying diffusion process across resolutions for image synthesis. arXiv preprint arXiv:2309.03350, 2023. 6 +[45] Yao Teng, Yue Wu, Han Shi, Xuefei Ning, Guohao Dai, Yu Wang, Zhenguo Li, and Xihui Liu. Dim: Diffusion mamba for efficient high-resolution image synthesis. arXiv preprint arXiv:2405.14224, 2024. 2 +[46] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. 5 +[47] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. 5 +[48] Shuai Wang, Zexian Li, Tianhui Song, Xubin Li, Tiezheng Ge, Bo Zheng, and Limin Wang. Flowdcn: Exploring dcn-like architectures for fast image generation with arbitrary resolution. arXiv preprint arXiv:2410.22655, 2024. 2, 6, 7 +[49] Jing Nathan Yan, Jiatao Gu, and Alexander M Rush. Diffusion models without attention. arXiv preprint arXiv:2311.18257, 2023. 2 +[50] Jingfeng Yao and Xinggang Wang. Reconstruction vs. generation: Taming optimization dilemma in latent diffusion models. arXiv preprint arXiv:2501.01423, 2025. 5, 6 +[51] Qihang Yu, Ju He, Xueqing Deng, Xiaohui Shen, and Liang-Chieh Chen. Randomized autoregressive visual generation. arXiv preprint arXiv:2411.00776, 2024. 1 +[52] Sihyun Yu, Sangkyung Kwak, Huiwon Jang, Jongheon Jeong, Jonathan Huang, Jinwoo Shin, and Saining Xie. Representation alignment for generation: Training diffusion transformers is easier than you think. arXiv preprint arXiv:2410.06940, 2024. 2, 3, 4, 5, 6, 7, 12 +[53] Xiaoyu Yue, Zidong Wang, Zeyu Lu, Shuyang Sun, Meng Wei, Wanli Ouyang, Lei Bai, and Luping Zhou. Diffusion models need visual priors for image generation. arXiv preprint arXiv:2410.08531, 2024. 2, 3 +[54] Le Zhuo, Ruoyi Du, Han Xiao, Yangguang Li, Dongyang Liu, Rongjie Huang, Wenze Liu, Lirui Zhao, Fu-Yun Wang, Zhanyu Ma, et al. Lumina-last: Making lumina-t2x stronger and faster with next-dit. arXiv preprint arXiv:2406.18583, 2024. 2 + +# A. Model Specs + +
Config#LayersHidden dim#Heads
B/21276812
L/224102416
XL/228115216
+ +# B. Hyper-parameters + +
VAE +VAE donwsample +latent channelSD-VAE-f8d4-ft-ema +8 +4
optimizerAdamW [22]
base learning rate1e-4
weight decay0.0
batch size256
learning rate scheduleconstant
augmentationcenter crop
diffusion samplerEuler-ODE
diffusion steps250
evaluation suiteADM [10]
+ +# C. Linear flow and Diffusion + +Given the SDE forward and reverse process: + +$$ +d \boldsymbol {x} _ {t} = f (t) \boldsymbol {x} _ {t} \mathrm {d} t + g (t) \mathrm {d} \boldsymbol {w} \tag {9} +$$ + +$$ +d \boldsymbol {x} _ {t} = [ f (t) \boldsymbol {x} _ {t} - g (t) ^ {2} \nabla_ {\boldsymbol {x}} \log p (\boldsymbol {x} _ {t}) ] d t + g (t) d \boldsymbol {w} \tag {10} +$$ + +A corresponding deterministic process exists with trajectories sharing the same marginal probability densities of reverse SDE. + +$$ +d \boldsymbol {x} _ {t} = [ f (t) \boldsymbol {x} _ {t} - \frac {1}{2} g (t) ^ {2} \nabla_ {\boldsymbol {x} _ {t}} \log p (\boldsymbol {x} _ {t}) ] d t \tag {11} +$$ + +Given $x_{t} = \alpha_{t}x_{data} + \sigma \epsilon$ . The traditional diffusion model learns: + +$$ +\nabla_ {\boldsymbol {x} _ {t}} \log p (\boldsymbol {x} _ {t}) = - \frac {\epsilon}{\sigma (t)} \tag {12} +$$ + +The flow-matching framework actually learns the following: + +$$ +\begin{array}{l} \boldsymbol {v} _ {t} = \dot {\alpha} x + \dot {\sigma} \epsilon (13) \\ = x - \epsilon (14) \\ \end{array} +$$ + +Here we will demonstrate in flow-matching, the $\boldsymbol{v}_t$ prediction is actually as same as the reverse ode: + +$$ +\begin{array}{l} \dot {\alpha} x + \dot {\sigma} \epsilon (15) \\ = f (t) \boldsymbol {x} _ {t} - \frac {1}{2} g (t) ^ {2} \nabla_ {\boldsymbol {x} _ {t}} \log p (\boldsymbol {x} _ {t}) (16) \\ \end{array} +$$ + +Let us start by expanding the reverse ode first. + +$$ +\begin{array}{l} f (t) \boldsymbol {x} _ {t} - \frac {1}{2} g (t) ^ {2} \nabla_ {\boldsymbol {x} _ {t}} \log p (\boldsymbol {x} _ {t}) (17) \\ = f (t) (\alpha (t) \boldsymbol {x} _ {\text {d a t a}} + \sigma (t) \epsilon) - \frac {1}{2} g (t) ^ {2} \left[ - \frac {\epsilon}{\sigma (t)} \right] (18) \\ = f (t) \alpha (t) \boldsymbol {x} _ {\text {d a t a}} + (f (t) \sigma (t) + \frac {1}{2} \frac {g (t) ^ {2}}{\sigma (t)}) \epsilon (19) \\ \end{array} +$$ + +To prove Eq. (16), we needs to demonstrate that: + +$$ +\dot {\alpha} (t) = f _ {t} \alpha (t) \tag {20} +$$ + +$$ +\dot {\sigma} (t) = f _ {t} \sigma (t) + \frac {1}{2} \frac {g _ {t} ^ {2}}{\sigma (t)}. \tag {21} +$$ + +Here, let us derive the relation between $f_{t}$ and $\alpha(t)$ , $\dot{\alpha}(t)$ . We donate $x_{data}(t) = \alpha(t)x_{data}$ is the remain component of $x_{data}$ in $x_{t}$ , it is easy to find that: + +$$ +d \boldsymbol {x} _ {\text {d a t a}} (t) = f _ {t} \boldsymbol {x} _ {\text {d a t a}} (t) d t \tag {22} +$$ + +$$ +d \left(\alpha (t) x _ {d a t a}\right) = f _ {t} \alpha (t) x _ {d a t a} d t \tag {23} +$$ + +$$ +d \alpha (t) = f _ {t} \alpha (t) d t \tag {24} +$$ + +So, Eq. (20) is right. + +Based on the above equation, we will demonstrate the relation of $g_{t}, f_{t}$ with $\sigma(t)$ . Note that Gaussian noise has nice additive properties. + +$$ +a \epsilon_ {1} + b \epsilon_ {2} \in \mathcal {N} \left(0, \sqrt {a ^ {2} + b ^ {2}}\right) \tag {25} +$$ + +Let us start with the gaussian noise component $\epsilon(t)$ calculation, reaching at $t$ , every noise addition at $s \in [0, t]$ while been decayed by a factor of $\frac{\alpha(t)}{\alpha(s)}$ . Thus, the mixed Gaussian noise will have a std variance $\sigma(t)$ of: + +$$ +\sigma (t) = \sqrt {\left(\int_ {0} ^ {t} \left[ \left(\frac {\alpha (t)}{\alpha (s)}\right) ^ {2} g _ {s} ^ {2} \right] d s\right)} \tag {26} +$$ + +$$ +\sigma (t) = \alpha (t) \sqrt {\left(\int_ {0} ^ {t} \left[ \left(\frac {g _ {s}}{\alpha (s)}\right) ^ {2} \right] d s\right)} \tag {27} +$$ + +After obtaining the relation of $f_{t}, g_{t}$ and $\alpha(t), \sigma(t)$ , we derive $\dot{\alpha}(t)$ and $\dot{\sigma}(t)$ with above conditions: + +$$ +\dot {\alpha} (t) = f _ {t} \exp \left[ \int_ {0} ^ {t} f _ {s} d s \right] \tag {28} +$$ + +$$ +\dot {\alpha} (t) = f _ {t} \alpha (t) \tag {29} +$$ + +As for $\dot{\sigma} (t)$ , it is quit complex but not hard: + +$$ +\dot {\sigma} (t) = \dot {\alpha} (t) \sqrt {\left(\int_ {0} ^ {t} \left[ \left(\frac {g _ {t}}{\alpha (s)}\right) ^ {2} \right] d s\right)} + \alpha (t) \frac {\frac {1}{2} \frac {g _ {t} ^ {2}}{\alpha (t)}}{\sqrt {\left(\int_ {0} ^ {t} \left[ \left(\frac {g _ {t}}{\alpha (s)}\right) ^ {2} g _ {s} ^ {2} \right] d s\right)}} \tag {30} +$$ + +$$ +\dot {\sigma} (t) = \left(f _ {t} \alpha (t)\right) \sqrt {\left(\int_ {0} ^ {t} \left[ \left(\frac {g _ {t}}{\alpha (s)}\right) ^ {2} \right] d s\right)} + \alpha (t) \frac {\frac {1}{2} \frac {g _ {t} ^ {2}}{\alpha^ {2} (t)}}{\sqrt {\left(\int_ {0} ^ {t} \left[ \left(\frac {g _ {t}}{\alpha (s)}\right) ^ {2} \right] d s\right)}} \tag {31} +$$ + +$$ +\dot {\sigma} (t) = f _ {t} \alpha (t) \sqrt {\left(\int_ {0} ^ {t} \left[ \left(\frac {g _ {t}}{\alpha (s)}\right) ^ {2} \right] d s\right)} + \frac {\frac {1}{2} g _ {t} ^ {2}}{\alpha (t) \sqrt {\left(\int_ {0} ^ {t} \left[ \left(\frac {g _ {t}}{\alpha (s)}\right) ^ {2} \right] d s\right)}} \tag {32} +$$ + +$$ +\dot {\sigma} (t) = f _ {t} \sigma (t) + \frac {1}{2} \frac {g t}{\sigma (t)} \tag {33} +$$ + +So, Eq. (21) is right. + +# D. Proof of Spectrum Autoregressive + +Given the noise scheduler $\{\alpha_{t},\sigma_{t}\}$ , the clean data $\pmb{x}_{\mathrm{data}}$ and Gaussian noise $\epsilon$ . Denote $K_{freq}$ as the maximum frequency of the clean data $\pmb{x}_{\mathrm{data}}$ . The noisy latent $x_{t}$ at timestep $t$ has been defined as: + +$$ +\boldsymbol {x} _ {t} = \alpha_ {t} \boldsymbol {x} _ {\text {d a t a}} + \sigma_ {t} \boldsymbol {\epsilon} \tag {34} +$$ + +The spectrum magnitude $c_{i}$ of $x_{t}$ on DCT basics $\mathbf{u}_{i}$ follows: + +$$ +\boldsymbol {c} _ {i} = \mathbb {E} _ {\epsilon} \left[ \boldsymbol {u} _ {i} ^ {T} \boldsymbol {x} _ {t} \right] ^ {2} +$$ + +$$ +\boldsymbol {c} _ {i} = \mathbb {E} _ {\epsilon} [ \boldsymbol {u} _ {i} ^ {T} (\alpha_ {t} \boldsymbol {x} _ {d a t a} + \sigma_ {t} \boldsymbol {\epsilon}) ] ^ {2} +$$ + +Recall that the spectrum magnitude of Gaussian noise $\epsilon$ is uniformly distributed. + +$$ +\pmb {c} _ {i} = [ \alpha_ {t} \pmb {u} _ {i} ^ {T} \pmb {x} _ {d a t a} ] ^ {2} + 2 \alpha_ {t} \sigma_ {t} \mathbb {E} _ {\epsilon} [ \pmb {u} _ {i} ^ {T} \pmb {x} _ {d a t a} \pmb {u} _ {i} ^ {T} \epsilon ] + \sigma_ {t} ^ {2} \mathbb {E} _ {\epsilon} [ \pmb {u} _ {i} ^ {T} \epsilon ] ^ {2} +$$ + +$$ +\boldsymbol {c} _ {i} = \left[ \alpha_ {t} \boldsymbol {u} _ {i} ^ {T} \boldsymbol {x} _ {\text {d a t a}} \right] ^ {2} + \sigma_ {t} ^ {2} \mathbb {E} _ {\boldsymbol {\epsilon}} \left[ \boldsymbol {u} _ {i} ^ {T} \boldsymbol {\epsilon} \right] ^ {2} +$$ + +$$ +\boldsymbol {c} _ {i} = \alpha_ {t} ^ {2} \left[ \boldsymbol {u} _ {i} ^ {T} \boldsymbol {x} _ {\text {d a t a}} \right] ^ {2} + \sigma_ {t} ^ {2} \lambda +$$ + +if $\sigma_t^2\lambda$ has bigger value than $[\alpha_t\pmb{u}_i^T\pmb{x}_{data}]^2$ , the spectrum magnitude $\pmb{c}_i$ on DCT basics $\pmb{u}_i$ will be canceled, thus the maximal remaining frequency $f_{max}(t)$ of original data in $\pmb{x}_t$ follows: + +$$ +f _ {\max } (t) > \min \left(\left(\frac {\alpha_ {t} \boldsymbol {u} _ {i} ^ {T} \boldsymbol {x} _ {\text {d a t a}}}{\sigma_ {t} \lambda}\right) ^ {2}, K _ {\text {f r e q}}\right) \tag {35} +$$ + +Though $\frac{\alpha_t\pmb{u}_i^T\pmb{x}_{data}}{\sigma_t\lambda}^2$ depends on the dataset. Here, we directly suppose it as a constant 1. And replace $\alpha = t$ and $\sigma = 1 - t$ in above equation: + +$$ +f _ {\max } (t) > \min \left(\left(\frac {t}{1 - t}\right) ^ {2}, K _ {f r e q}\right) \tag {36} +$$ + +# E. Linear multisteps method + +We conduct targeted experiment on SiT-XL/2 with Adams-Bashforth like linear multistep solver; To clarify, we did not employ this powerful solver for our DDT models in all tables across the main paper. + +The reverse ode of the diffusion models tackles the following integral: + +$$ +\boldsymbol {x} _ {i + 1} = \boldsymbol {x} _ {i} + \int_ {t _ {i}} ^ {t _ {i + 1}} \boldsymbol {v} _ {\theta} (\boldsymbol {x} _ {t}, t) d t \tag {37} +$$ + +The classic Euler method employs $\pmb{v}_{\theta}(\pmb{x}_i, t_i)$ as an estimate of $\pmb{v}_{\theta}(\pmb{x}_t, t)$ throughout the interval $[t_i, t_{i+1}]$ + +$$ +\boldsymbol {x} _ {i + 1} = \boldsymbol {x} _ {i} + \left(t _ {i + 1} - t _ {i}\right) \boldsymbol {v} _ {\theta} \left(\boldsymbol {x} _ {i}, t _ {i}\right). \tag {38} +$$ + +The most classic multi-step solver Adams-Bashforth method (deemed as Adams for brevity) incorporates the Lagrange polynomial to improve the estimation accuracy with previous predictions. + +$$ +\boldsymbol {v} _ {\theta} (\boldsymbol {x} _ {t}, t) = \sum_ {j = 0} ^ {i} (\prod_ {k = 0, k \neq j} ^ {i} \frac {t - t _ {k}}{t _ {j} - t _ {k}}) \boldsymbol {v} _ {\theta} (\boldsymbol {x} _ {j}, t _ {j}) +$$ + +$$ +\boldsymbol {x} _ {i + 1} \approx \boldsymbol {x} _ {i} + \int_ {t _ {i}} ^ {t _ {i + 1}} \sum_ {j = 0} ^ {i} (\prod_ {k = 0, k \neq j} ^ {i} \frac {t - t _ {k}}{t _ {j} - t _ {k}}) \boldsymbol {v} _ {\theta} (\boldsymbol {x} _ {j}, t _ {j}) d t +$$ + +$$ +\boldsymbol {x} _ {i + 1} \approx \boldsymbol {x} _ {i} + \sum_ {j = 0} ^ {i} \boldsymbol {v} _ {\theta} (\boldsymbol {x} _ {j}, t _ {j}) \int_ {t _ {i}} ^ {t _ {i + 1}} \left(\prod_ {k = 0, k \neq j} ^ {i} \frac {t - t _ {k}}{t _ {j} - t _ {k}}\right) d t +$$ + +Note that $\int_{t_i}^{t_{i + 1}}\left(\prod_{k = 0,k\neq j}^i\frac{t - t_k}{t_j - t_k}\right)dt$ of the Lagrange polynomial can be pre-integrated into a constant coefficient, resulting in only naive summation being required for ODE solving. + +# F. Classifier free guidance. + +As classifier-free guidance significantly impacts the performance of diffusion models. Traditional classifier-free guidance improves performance at the cost of decreased diversity. Interval guidance is recently been adopted by REPA[52] and Causalfusion[9]. It applies classifier-free guidance only to the high-frequency generation phase to preserve the diversity. We sweep different classifier-free guidance strength with selected intervals. Our DDT-XL/2 achieves the best performance with interval [0.3, 1] with a classifier-free guidance of 2. Recall that we donate $t = 0$ as the pure noise timestep while REPA[52] use $t = 1$ , thus this exactly corresponds to the [0, 0.7] interval in REPA[52] + +![](images/d832ac8214ead6291814ec65b36289871ce8a3fe75d47d1a9b200cf3022d9f90.jpg) +Classifier-free guidance with intervals +Figure 9. FID10K of DDT-XL/2 with different Classifier free guidance strength and guidance intervals. We sweep different classifier-free guidance strength with selected intervals. Our DDT-XL/2 achieves the best performance with interval [0.3, 1] with a classifier-free guidance of 2. \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05741/images/0107bf7a532ff9c9a3c73a46031f8f1ad26badac4b3d1c14d2d9de1f95c1568a.jpg b/data/2025/2504_05xxx/2504.05741/images/0107bf7a532ff9c9a3c73a46031f8f1ad26badac4b3d1c14d2d9de1f95c1568a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f2eaea3717f43178c955351bad04cfe8ad47cb8e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/0107bf7a532ff9c9a3c73a46031f8f1ad26badac4b3d1c14d2d9de1f95c1568a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ffb10a862671dcee9eb295ac3f949715cb15f391193c463f07fe199b63c82d0 +size 25991 diff --git a/data/2025/2504_05xxx/2504.05741/images/033d63fcd85c9681fb31fefd5873fed342d5bf14138f45e31589702949bc1243.jpg b/data/2025/2504_05xxx/2504.05741/images/033d63fcd85c9681fb31fefd5873fed342d5bf14138f45e31589702949bc1243.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6bb47773b52231a78f5d2c4fa9f7da9480d4567f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/033d63fcd85c9681fb31fefd5873fed342d5bf14138f45e31589702949bc1243.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93a214d892db66ae111509068a357c65aeb2022b3f17e1d67dcecbab5a208076 +size 30291 diff --git a/data/2025/2504_05xxx/2504.05741/images/0af46efc8109063459d92e3ac952af1d0db4c1f4374abc2379210c2a8158df0c.jpg b/data/2025/2504_05xxx/2504.05741/images/0af46efc8109063459d92e3ac952af1d0db4c1f4374abc2379210c2a8158df0c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..72aacc6dba2306eee9aa972bb760c602f162d5d8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/0af46efc8109063459d92e3ac952af1d0db4c1f4374abc2379210c2a8158df0c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e858e83dc046e42c7894149c0e6a0119b48e67f8fdde05b54fa6caa03ad18ca3 +size 6269 diff --git a/data/2025/2504_05xxx/2504.05741/images/0b0fcf7f95fe250a8fe6bbf44a1d260920fff8418690e8baac77374ffe3abe42.jpg b/data/2025/2504_05xxx/2504.05741/images/0b0fcf7f95fe250a8fe6bbf44a1d260920fff8418690e8baac77374ffe3abe42.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ffbe8ee87b5fa652f2e84198555b776e30dc0d8c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/0b0fcf7f95fe250a8fe6bbf44a1d260920fff8418690e8baac77374ffe3abe42.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6337377f83a53618b005a57443c21986d5e1b63f3d89f0feaeb726b0bbe0143c +size 19039 diff --git a/data/2025/2504_05xxx/2504.05741/images/0ebc019b251670c81e10a173838c2b88d5c11676822922ca18c1f72bf0779ad2.jpg b/data/2025/2504_05xxx/2504.05741/images/0ebc019b251670c81e10a173838c2b88d5c11676822922ca18c1f72bf0779ad2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a1b8401875a6d904e29de6443a50a24bb5b6a296 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/0ebc019b251670c81e10a173838c2b88d5c11676822922ca18c1f72bf0779ad2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebe60cc803d102c7553f89912ee70589650808ebac4d5c27f2d0cb61d28283d5 +size 3257 diff --git a/data/2025/2504_05xxx/2504.05741/images/1848d6f6c6f09a80d9580fe7a0472334470d7402a8f0f9ebc75f0e54f77cd2c0.jpg b/data/2025/2504_05xxx/2504.05741/images/1848d6f6c6f09a80d9580fe7a0472334470d7402a8f0f9ebc75f0e54f77cd2c0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6e50a5268e2572d365b82b045a3d0a455a7923a0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/1848d6f6c6f09a80d9580fe7a0472334470d7402a8f0f9ebc75f0e54f77cd2c0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5ba5a02d052233f690e71cb0068df62cf76363720422c13c00fe39fdd01c91b +size 5834 diff --git a/data/2025/2504_05xxx/2504.05741/images/1938f29b7db82456a19fe9cf4e7e990304681543c87d5423f264908aa8700184.jpg b/data/2025/2504_05xxx/2504.05741/images/1938f29b7db82456a19fe9cf4e7e990304681543c87d5423f264908aa8700184.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a19524e4df6a47881a999a38a635879d32cefd1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/1938f29b7db82456a19fe9cf4e7e990304681543c87d5423f264908aa8700184.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a135921185315276a8ec8c303409defaa631c6387299aadfe437033fe671e4d9 +size 6476 diff --git a/data/2025/2504_05xxx/2504.05741/images/1ccde59bc1a1cfc30df0759f2c95e650a8bc6504557e78d0263dbb2317d8503f.jpg b/data/2025/2504_05xxx/2504.05741/images/1ccde59bc1a1cfc30df0759f2c95e650a8bc6504557e78d0263dbb2317d8503f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b8f28c4e9bd608128d3752315a4b3bcb4cbdb1cc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/1ccde59bc1a1cfc30df0759f2c95e650a8bc6504557e78d0263dbb2317d8503f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5de3650048db52e846f336a7b59608adb7190aacc4e3d47264ca2b18560ddf77 +size 3870 diff --git a/data/2025/2504_05xxx/2504.05741/images/1d28894d6a6c123e5ad9b28eb9137e605f6610da6589dca3780f4f02c6a2550c.jpg b/data/2025/2504_05xxx/2504.05741/images/1d28894d6a6c123e5ad9b28eb9137e605f6610da6589dca3780f4f02c6a2550c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..93502c1b2da365ddf19b31a55c593a21d92856ee --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/1d28894d6a6c123e5ad9b28eb9137e605f6610da6589dca3780f4f02c6a2550c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:109d8f1e671f3d1813004a982ab304f5a4e284f55fa96d4ae47d8bf4385e1df1 +size 12669 diff --git a/data/2025/2504_05xxx/2504.05741/images/2174863fe03b8a6fefacf499b5367771b2602074960c0e4466c227983208a97e.jpg b/data/2025/2504_05xxx/2504.05741/images/2174863fe03b8a6fefacf499b5367771b2602074960c0e4466c227983208a97e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a88549deee41261be17d459d51665d13e6ab7be6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/2174863fe03b8a6fefacf499b5367771b2602074960c0e4466c227983208a97e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9260bd11bf2acbd052090c4f929f041a7f19a9d2849ad43ee168586874a0b61 +size 4705 diff --git a/data/2025/2504_05xxx/2504.05741/images/240994ca06ad9c15a377423cda559735d73e4fe5a793b86a2070d7f7a5a19be9.jpg b/data/2025/2504_05xxx/2504.05741/images/240994ca06ad9c15a377423cda559735d73e4fe5a793b86a2070d7f7a5a19be9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c7edb5bef7426a048aa602bb538f86feceba2ff0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/240994ca06ad9c15a377423cda559735d73e4fe5a793b86a2070d7f7a5a19be9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbfef82326553e451ab75bd8fabd3fe0600e620c8f6c640c824f0d02e85e3fbe +size 6194 diff --git a/data/2025/2504_05xxx/2504.05741/images/2757235a519a66a5a620a7b238d4836051bbd2024c54007acd28fc92d3a5a40d.jpg b/data/2025/2504_05xxx/2504.05741/images/2757235a519a66a5a620a7b238d4836051bbd2024c54007acd28fc92d3a5a40d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..705cf2f0f868d9dcec8c6ac5d59c33b3f180ac9d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/2757235a519a66a5a620a7b238d4836051bbd2024c54007acd28fc92d3a5a40d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4551be4e6f96d6c5ce73e4dc3d491e262583343bbb3ad81704e5a0d8afe01c7b +size 26866 diff --git a/data/2025/2504_05xxx/2504.05741/images/2e125489df63d1dba622d543f1d89100bb70834641556935ea63e93e2a42a6ee.jpg b/data/2025/2504_05xxx/2504.05741/images/2e125489df63d1dba622d543f1d89100bb70834641556935ea63e93e2a42a6ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c761bcecb6ed2d9962b09ac544400e4bbefaeb8d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/2e125489df63d1dba622d543f1d89100bb70834641556935ea63e93e2a42a6ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20369484bca3b38c9c1b525bde7dd5dd6f7a8cc08552d88e96c3f372121638b5 +size 5598 diff --git a/data/2025/2504_05xxx/2504.05741/images/3819a0ec430cb3be1877e0e5d680b83af80f03b87287a8aa211ea73638f31fce.jpg b/data/2025/2504_05xxx/2504.05741/images/3819a0ec430cb3be1877e0e5d680b83af80f03b87287a8aa211ea73638f31fce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b54a9f734f253da7518af17dc427e8beb0b6729 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/3819a0ec430cb3be1877e0e5d680b83af80f03b87287a8aa211ea73638f31fce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22577c4bdfff4369bb926063d70bad65c7eed9f0aba3ae8abb0f9e580d9c1149 +size 4971 diff --git a/data/2025/2504_05xxx/2504.05741/images/38214edebabe768045a7736f9e2fe165033a6bea65d8cf66547cf0b5ea3afac5.jpg b/data/2025/2504_05xxx/2504.05741/images/38214edebabe768045a7736f9e2fe165033a6bea65d8cf66547cf0b5ea3afac5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..32f30efccf3ac1d75b05485af3dc33efea66931b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/38214edebabe768045a7736f9e2fe165033a6bea65d8cf66547cf0b5ea3afac5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c1f6cfc670ce04ebb84f81e31a99518c10c4bb275575d3e33d02592666c8441 +size 82892 diff --git a/data/2025/2504_05xxx/2504.05741/images/389c597cef146629588878e629b7d0d0b5ca0d6398fabe96cf29e4bf25f18c60.jpg b/data/2025/2504_05xxx/2504.05741/images/389c597cef146629588878e629b7d0d0b5ca0d6398fabe96cf29e4bf25f18c60.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5f32aa08820c16fce3e6b9f40ce4a8460337daf3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/389c597cef146629588878e629b7d0d0b5ca0d6398fabe96cf29e4bf25f18c60.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81c8cb3893bc34d00ba86382e21511a7d98593534220c155e1755fe3842dfabe +size 3006 diff --git a/data/2025/2504_05xxx/2504.05741/images/38a84746352f99aa58f5024892b9a887d5561868bc1c864e25f7023b62d30ebf.jpg b/data/2025/2504_05xxx/2504.05741/images/38a84746352f99aa58f5024892b9a887d5561868bc1c864e25f7023b62d30ebf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..365a8227e5e90c459b2557274a78f12ffe0260cc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/38a84746352f99aa58f5024892b9a887d5561868bc1c864e25f7023b62d30ebf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7f0c323267a7154eac7bea1f96d096db8b973c4551c4f5eb999186b4ddc9930 +size 5088 diff --git a/data/2025/2504_05xxx/2504.05741/images/3e79e639e13e47101e33d977d48cadf8bbff9a2854bc79f59e8e311cf8310e95.jpg b/data/2025/2504_05xxx/2504.05741/images/3e79e639e13e47101e33d977d48cadf8bbff9a2854bc79f59e8e311cf8310e95.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ae610479f486c6bd32b67b9846893630f9d16cc7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/3e79e639e13e47101e33d977d48cadf8bbff9a2854bc79f59e8e311cf8310e95.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54a9dd45068275d4159cba2ffaa2889e28f2c5d947435be4ce58f68608fed01f +size 5648 diff --git a/data/2025/2504_05xxx/2504.05741/images/3eb3bb847dbdbe1a2a47a22afa09e43d4aa5684a5ada149fd7f1037369596351.jpg b/data/2025/2504_05xxx/2504.05741/images/3eb3bb847dbdbe1a2a47a22afa09e43d4aa5684a5ada149fd7f1037369596351.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b57d225b19290da94baa9dbb6943e4400ed90588 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/3eb3bb847dbdbe1a2a47a22afa09e43d4aa5684a5ada149fd7f1037369596351.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:920f0cd1490d1f90a4442c0860500dd518b843b12f66adc2f62bff47e0b4c78f +size 4666 diff --git a/data/2025/2504_05xxx/2504.05741/images/3fcb27ffe48aea1ccedcd25a827cfac28fb0c50ec36b20a02dd8d0867f62749c.jpg b/data/2025/2504_05xxx/2504.05741/images/3fcb27ffe48aea1ccedcd25a827cfac28fb0c50ec36b20a02dd8d0867f62749c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f17c283fd16633a154fcf83a2208f4f45997784 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/3fcb27ffe48aea1ccedcd25a827cfac28fb0c50ec36b20a02dd8d0867f62749c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0942cedbd9c2648d82dc4b7b3c458c1fc9674b0c29aeedd3e0f4d862d765e88f +size 5387 diff --git a/data/2025/2504_05xxx/2504.05741/images/407f5b5ff8f50c1416dcfbeafdd2fea4269a384cafb24e536a03e1635a638c3b.jpg b/data/2025/2504_05xxx/2504.05741/images/407f5b5ff8f50c1416dcfbeafdd2fea4269a384cafb24e536a03e1635a638c3b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..47fca5ab990a92447169cd5d145800021d8fbf8c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/407f5b5ff8f50c1416dcfbeafdd2fea4269a384cafb24e536a03e1635a638c3b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eabfa1bc7fd5a50a233f1ae4f28defa4c78e525927cc59f84ecdba05500a3ad5 +size 26913 diff --git a/data/2025/2504_05xxx/2504.05741/images/4205adae321fff2cd11e1a6112d2e8212e500eb09240ca6578d94e76fa6abbc7.jpg b/data/2025/2504_05xxx/2504.05741/images/4205adae321fff2cd11e1a6112d2e8212e500eb09240ca6578d94e76fa6abbc7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..621c4b20632dbabc4bf8c4a6348119f5a27c5ff1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/4205adae321fff2cd11e1a6112d2e8212e500eb09240ca6578d94e76fa6abbc7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:932eef6b794c17334b79287ea2f43b714a556690964343ba593f804366170b21 +size 35433 diff --git a/data/2025/2504_05xxx/2504.05741/images/444038e1ba194d39dcc047006d8249c0659c38455fcbbb140ca296cf75f66b58.jpg b/data/2025/2504_05xxx/2504.05741/images/444038e1ba194d39dcc047006d8249c0659c38455fcbbb140ca296cf75f66b58.jpg new file mode 100644 index 0000000000000000000000000000000000000000..97e1031c2438cafcca7991500575baf591d7d764 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/444038e1ba194d39dcc047006d8249c0659c38455fcbbb140ca296cf75f66b58.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e0cc11e46f9ea9ee1a7ec24fc60d2e0fe5895d6b7b45d5d8c0814bfd82add13 +size 25627 diff --git a/data/2025/2504_05xxx/2504.05741/images/48dcf09eaef0520cfce1c2c76b2f54623920e1af72057912b2c72e38de56a625.jpg b/data/2025/2504_05xxx/2504.05741/images/48dcf09eaef0520cfce1c2c76b2f54623920e1af72057912b2c72e38de56a625.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aeeeaae367b6f903485c2678a3be4bef5b825340 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/48dcf09eaef0520cfce1c2c76b2f54623920e1af72057912b2c72e38de56a625.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:262c7d97ceda62feab2626d309b2decc1b6098e1624328654dc7e049c8be38cf +size 15997 diff --git a/data/2025/2504_05xxx/2504.05741/images/4b8104cb4cd6afc445f9075381ba25c807552c05b2bdf48c9ed38b7d6ca8600f.jpg b/data/2025/2504_05xxx/2504.05741/images/4b8104cb4cd6afc445f9075381ba25c807552c05b2bdf48c9ed38b7d6ca8600f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..40da2b1af1277dd6c4ee437bae605ae5b737fa92 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/4b8104cb4cd6afc445f9075381ba25c807552c05b2bdf48c9ed38b7d6ca8600f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:893c69ee0d0842708c4492416e86410b2768ef5a9159be854105954675591dfb +size 24414 diff --git a/data/2025/2504_05xxx/2504.05741/images/4e983e2fbbacb92b9637d32f798ca1cd2a3d14d6e611445b59a01bd33e0f2f41.jpg b/data/2025/2504_05xxx/2504.05741/images/4e983e2fbbacb92b9637d32f798ca1cd2a3d14d6e611445b59a01bd33e0f2f41.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0d210e83192e4e97711b61263b9f16d648913a35 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/4e983e2fbbacb92b9637d32f798ca1cd2a3d14d6e611445b59a01bd33e0f2f41.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b06cf63e83c522cc77baf323f66b283713397e727b2066dd290c8a2e405547fa +size 33664 diff --git a/data/2025/2504_05xxx/2504.05741/images/52fcbf99861d85d37be6457dffaacd66f6ad84be517791bad824e0570d123414.jpg b/data/2025/2504_05xxx/2504.05741/images/52fcbf99861d85d37be6457dffaacd66f6ad84be517791bad824e0570d123414.jpg new file mode 100644 index 0000000000000000000000000000000000000000..47c033ebcc05ef6db1b4739262a1e5c9e34d66e9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/52fcbf99861d85d37be6457dffaacd66f6ad84be517791bad824e0570d123414.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd22ae64a91173c3dbd1d4899c6029dffd4db4846a84453ce984981659838b7d +size 3527 diff --git a/data/2025/2504_05xxx/2504.05741/images/5a24c09eedca9747aacdbdfd328bb09196ff1b1755c47a408ce01368daedce5d.jpg b/data/2025/2504_05xxx/2504.05741/images/5a24c09eedca9747aacdbdfd328bb09196ff1b1755c47a408ce01368daedce5d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f07f0423f76284c81661fde994e1c9379149618f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/5a24c09eedca9747aacdbdfd328bb09196ff1b1755c47a408ce01368daedce5d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12d2e0abf2187b7debab96b285df4696912ca61eadc0100009fe36571070e72d +size 8595 diff --git a/data/2025/2504_05xxx/2504.05741/images/5b111d9945677c3832d9ac16cd82db57d76f31a097ccf022f7c4a6509822f31f.jpg b/data/2025/2504_05xxx/2504.05741/images/5b111d9945677c3832d9ac16cd82db57d76f31a097ccf022f7c4a6509822f31f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6c4685ebf25386a02f793a6366f766fb140579eb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/5b111d9945677c3832d9ac16cd82db57d76f31a097ccf022f7c4a6509822f31f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d65220d15fbe5f8d875a33e0e4e822fde9ee0c431ad159d45ddc1a6744ada1f3 +size 5385 diff --git a/data/2025/2504_05xxx/2504.05741/images/5c964270f3a4817ca55a037ecad8114adfd78a4b41f8f787a697f5c1bd04ae58.jpg b/data/2025/2504_05xxx/2504.05741/images/5c964270f3a4817ca55a037ecad8114adfd78a4b41f8f787a697f5c1bd04ae58.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30fa1b2e15a2ad05d98bdbef1bfe7454578d9f4b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/5c964270f3a4817ca55a037ecad8114adfd78a4b41f8f787a697f5c1bd04ae58.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44fef050ebdcb417b28fa57217daacdb54f4b94f071528ebe584a11a523c8170 +size 3747 diff --git a/data/2025/2504_05xxx/2504.05741/images/5f19bedd2962b800c650498be0a19331c6b5d047d3286b065b1fe6c3b814debf.jpg b/data/2025/2504_05xxx/2504.05741/images/5f19bedd2962b800c650498be0a19331c6b5d047d3286b065b1fe6c3b814debf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56daf35932a773a6b9fe2d487c98fb28426971a7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/5f19bedd2962b800c650498be0a19331c6b5d047d3286b065b1fe6c3b814debf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1347c5819a0bc07073a00af53f16f9dc28dea05af29be9989bc8791008076a6 +size 12812 diff --git a/data/2025/2504_05xxx/2504.05741/images/5fcda7ffbccffd15d471f3469e4b72ebac13d9d7c36657a87f266d34d09fb509.jpg b/data/2025/2504_05xxx/2504.05741/images/5fcda7ffbccffd15d471f3469e4b72ebac13d9d7c36657a87f266d34d09fb509.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e81fd093aea87d8d7dae7535a5e10c6098f61823 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/5fcda7ffbccffd15d471f3469e4b72ebac13d9d7c36657a87f266d34d09fb509.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebddce48a8b342350577ee074c233a4d07abf45893b7e86254ff0f8034e9bb3a +size 3554 diff --git a/data/2025/2504_05xxx/2504.05741/images/635a316bd00ad0a5ed1896afecb07f016c5e1270da8f778e259185289c975a0c.jpg b/data/2025/2504_05xxx/2504.05741/images/635a316bd00ad0a5ed1896afecb07f016c5e1270da8f778e259185289c975a0c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4cea13a907e8435abdb716312e45a14273a53790 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/635a316bd00ad0a5ed1896afecb07f016c5e1270da8f778e259185289c975a0c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c33800edb1f6a98bad82dff78bc268ea578237600f121a711e4a57e7adcf6ee +size 4522 diff --git a/data/2025/2504_05xxx/2504.05741/images/6aeb8ad5d07d128fe9a1397a63a04c9474c9d30a1f1f63a493b7426b0b7ca35a.jpg b/data/2025/2504_05xxx/2504.05741/images/6aeb8ad5d07d128fe9a1397a63a04c9474c9d30a1f1f63a493b7426b0b7ca35a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..459e5d22fe15506958f56174b28aca33fd4387a3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/6aeb8ad5d07d128fe9a1397a63a04c9474c9d30a1f1f63a493b7426b0b7ca35a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b138bedfc3bcc6a7158cdab089106b7bd965a5a9f9176f1beb02fd1fec74e8f +size 60669 diff --git a/data/2025/2504_05xxx/2504.05741/images/6ff463028d0d43933dd6eb9fe5122b58039284839b9e9f78028cca72c059078a.jpg b/data/2025/2504_05xxx/2504.05741/images/6ff463028d0d43933dd6eb9fe5122b58039284839b9e9f78028cca72c059078a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..104eb1c713e496feda95ed5a77b7ac7ef053b934 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/6ff463028d0d43933dd6eb9fe5122b58039284839b9e9f78028cca72c059078a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9de11a1a33c3eca4d7c1d5b93fb348c4b95ae3fac5570909cdb1fb96db2ad85 +size 15787 diff --git a/data/2025/2504_05xxx/2504.05741/images/719e4e21f1111305b86c6e48372ff1039979d4964464cd509e89d47859b11164.jpg b/data/2025/2504_05xxx/2504.05741/images/719e4e21f1111305b86c6e48372ff1039979d4964464cd509e89d47859b11164.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89f02c2c656eaf105517fa893860c77ef88f8cfc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/719e4e21f1111305b86c6e48372ff1039979d4964464cd509e89d47859b11164.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e655c884bf105d61cffefc3615c8adb09f428047b4f80148b211abcbea3e5a0 +size 3801 diff --git a/data/2025/2504_05xxx/2504.05741/images/72b79304d4617c22a6598c0b47f0a98af50e4c7399f5c40fbd5541f405d454d1.jpg b/data/2025/2504_05xxx/2504.05741/images/72b79304d4617c22a6598c0b47f0a98af50e4c7399f5c40fbd5541f405d454d1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c218f5ab28921b8fee35bbcabd5cad3fe018a1e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/72b79304d4617c22a6598c0b47f0a98af50e4c7399f5c40fbd5541f405d454d1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd9f34fb572b8ec538e6bb8f48122f281fb00ab4965c72c53decb3c7aafbbaf5 +size 41444 diff --git a/data/2025/2504_05xxx/2504.05741/images/7e39be504d140de8a0eb60ad117b831c92f3ff21d51dc4f094b22d491f317bb0.jpg b/data/2025/2504_05xxx/2504.05741/images/7e39be504d140de8a0eb60ad117b831c92f3ff21d51dc4f094b22d491f317bb0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d8397e374b334411816e7e6ebe879ad2dc27ab77 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/7e39be504d140de8a0eb60ad117b831c92f3ff21d51dc4f094b22d491f317bb0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4686ee16f902f97e0c7aa2ae68962121b5f08bba3a363b802968e2a5573010eb +size 7028 diff --git a/data/2025/2504_05xxx/2504.05741/images/825c285a9b88903c4b94ff2f427c70b668d7df96ef75f9c04693bd0bf53675c7.jpg b/data/2025/2504_05xxx/2504.05741/images/825c285a9b88903c4b94ff2f427c70b668d7df96ef75f9c04693bd0bf53675c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69f4c18e86453356ff114132e9e29c466a810a0d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/825c285a9b88903c4b94ff2f427c70b668d7df96ef75f9c04693bd0bf53675c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:533ad287316d8ed7369a2a9db2a105727a14dd2be41cb7fb0b1715353740a312 +size 3888 diff --git a/data/2025/2504_05xxx/2504.05741/images/844ef683faa78a606fedc9e3b46065adc7dfdff3ac5e76d82e994d3fa16edd1f.jpg b/data/2025/2504_05xxx/2504.05741/images/844ef683faa78a606fedc9e3b46065adc7dfdff3ac5e76d82e994d3fa16edd1f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..49466a51ee86f188b9a167f29183fd6414ac52ee --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/844ef683faa78a606fedc9e3b46065adc7dfdff3ac5e76d82e994d3fa16edd1f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b523008b82d2747cd76f68afe2db69ed4af870ecc78626368709158ead5bbcb +size 4132 diff --git a/data/2025/2504_05xxx/2504.05741/images/84b1912394fff3cb83fd0749fd0dec605e2b25ed9a230329921b06dc0dbcf2bc.jpg b/data/2025/2504_05xxx/2504.05741/images/84b1912394fff3cb83fd0749fd0dec605e2b25ed9a230329921b06dc0dbcf2bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..668356a7b1aca9bb30e5c2dc23877b7db8570279 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/84b1912394fff3cb83fd0749fd0dec605e2b25ed9a230329921b06dc0dbcf2bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:376808387577f5d5dcc2904011ef31ea63e8d19ad3f814c68148ca009866a444 +size 6144 diff --git a/data/2025/2504_05xxx/2504.05741/images/874ff5454adc8cf556cf1b558751acd1189bf8a76765ffe3a13291cc381daac0.jpg b/data/2025/2504_05xxx/2504.05741/images/874ff5454adc8cf556cf1b558751acd1189bf8a76765ffe3a13291cc381daac0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d858c4f54e23f8654c14acbf34cf421604436574 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/874ff5454adc8cf556cf1b558751acd1189bf8a76765ffe3a13291cc381daac0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0af24839dafd6e9a7c1583929a13c75dc8995e510a453b5a0e14f0e73efa363 +size 5354 diff --git a/data/2025/2504_05xxx/2504.05741/images/8b034d5198ebc4188d13804b0061ca47f550b932382fdce17a2346d467b3bdfd.jpg b/data/2025/2504_05xxx/2504.05741/images/8b034d5198ebc4188d13804b0061ca47f550b932382fdce17a2346d467b3bdfd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87daa62e506bf64c95d3d0dd122ea9aa1a5bd6ae --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/8b034d5198ebc4188d13804b0061ca47f550b932382fdce17a2346d467b3bdfd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62993fa57006ff72877ae406796a5bd3f324e005d6116df473bf0aa6d5e6806b +size 7212 diff --git a/data/2025/2504_05xxx/2504.05741/images/8dd7b645cf3beb6c96a28f5e0aef4fa66291c343736d43a91f44bcb8296a057a.jpg b/data/2025/2504_05xxx/2504.05741/images/8dd7b645cf3beb6c96a28f5e0aef4fa66291c343736d43a91f44bcb8296a057a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c4f49dbb489e85f2a62ae4b4ffc5d8bca0d6ab8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/8dd7b645cf3beb6c96a28f5e0aef4fa66291c343736d43a91f44bcb8296a057a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30827679d2f4791ae3a1a0fa6d77b71ff30b12630659ef4d11d69bbd1976b7c9 +size 31745 diff --git a/data/2025/2504_05xxx/2504.05741/images/8f5b86f4867d6ab8b42c2472d952666b81efbbf0cd189ee083b33702d9a4da62.jpg b/data/2025/2504_05xxx/2504.05741/images/8f5b86f4867d6ab8b42c2472d952666b81efbbf0cd189ee083b33702d9a4da62.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d9037d80cb7031a3ae69ab358b1ffcebebd02b27 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/8f5b86f4867d6ab8b42c2472d952666b81efbbf0cd189ee083b33702d9a4da62.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66776f691590741dfd3142666de520ad990395aaec09f18f4546beb72c68c96c +size 5865 diff --git a/data/2025/2504_05xxx/2504.05741/images/90bb434a635dade7122ba293d4cd70c1b69caa74dee31e00757ff64653720253.jpg b/data/2025/2504_05xxx/2504.05741/images/90bb434a635dade7122ba293d4cd70c1b69caa74dee31e00757ff64653720253.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a9c8e8390f22824f34d7d7154a29a5a7a0ca9cc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/90bb434a635dade7122ba293d4cd70c1b69caa74dee31e00757ff64653720253.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e9e4a32ece7d72e5e3eea1bbf899b02f942fea9771da5933f6187eee665d216 +size 21629 diff --git a/data/2025/2504_05xxx/2504.05741/images/98ee0e4b3bd272e2bf413b0c1ce26416c41201ccc216b837c3587047d88462d8.jpg b/data/2025/2504_05xxx/2504.05741/images/98ee0e4b3bd272e2bf413b0c1ce26416c41201ccc216b837c3587047d88462d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5107ae08cd4c2a4d1b3b2e8f2dff29943ddae216 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/98ee0e4b3bd272e2bf413b0c1ce26416c41201ccc216b837c3587047d88462d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:023589c9b138367a50742adcaed116cf854d42c2e6ff3421782fd9ae1e6f2e31 +size 15494 diff --git a/data/2025/2504_05xxx/2504.05741/images/99a12143a9ba00a6686a3a5f3c2d8ab84698fe37bec78394975ec8ead2901f90.jpg b/data/2025/2504_05xxx/2504.05741/images/99a12143a9ba00a6686a3a5f3c2d8ab84698fe37bec78394975ec8ead2901f90.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0eb93bce3d7609eb9b984e8ee8d9f814b85860c2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/99a12143a9ba00a6686a3a5f3c2d8ab84698fe37bec78394975ec8ead2901f90.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c59c5c586f4bea48ff99111692d176b4244d831eef55ea0f2515fb381bf58f6c +size 7658 diff --git a/data/2025/2504_05xxx/2504.05741/images/9a59fb5c4833508f282db8aecf20b1a86b1c6cb5a7ed19663e45728fc0e7c7f4.jpg b/data/2025/2504_05xxx/2504.05741/images/9a59fb5c4833508f282db8aecf20b1a86b1c6cb5a7ed19663e45728fc0e7c7f4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c8f21ede6e46e8d98cdc46abb98eb5375818160c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/9a59fb5c4833508f282db8aecf20b1a86b1c6cb5a7ed19663e45728fc0e7c7f4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:664f0e3664fa9dcce36d19b234f926946b05646b134c8b9feffa3c9115be5cee +size 6535 diff --git a/data/2025/2504_05xxx/2504.05741/images/a0eded9cc9000111a0d55a02d13bbbd9a2717def24bc78e459b1ae81d5d5b834.jpg b/data/2025/2504_05xxx/2504.05741/images/a0eded9cc9000111a0d55a02d13bbbd9a2717def24bc78e459b1ae81d5d5b834.jpg new file mode 100644 index 0000000000000000000000000000000000000000..834ba29b77515cdf36c1f2c2c4664d07e81f2164 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/a0eded9cc9000111a0d55a02d13bbbd9a2717def24bc78e459b1ae81d5d5b834.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09c0b75ccf716e28733a6c0c9a7a52ac8c56cf38cfd3d100bea3ae65af2056ce +size 4838 diff --git a/data/2025/2504_05xxx/2504.05741/images/a9a9a05c460fbfeb08572fc2b2eba0ff359e304716e9d7dfb2b8424d8103dd3e.jpg b/data/2025/2504_05xxx/2504.05741/images/a9a9a05c460fbfeb08572fc2b2eba0ff359e304716e9d7dfb2b8424d8103dd3e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..293e726d878d7847084a0f7441d6537e3b6f312b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/a9a9a05c460fbfeb08572fc2b2eba0ff359e304716e9d7dfb2b8424d8103dd3e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d157321abf9f81541257b6975c9e5b27218da1312d47bae3b2e826ade57ddbb +size 4993 diff --git a/data/2025/2504_05xxx/2504.05741/images/aaea39edf7b8250bfea02aa21ada7ea2b7adefa9180fb407fa2fe28f0a5ac9d0.jpg b/data/2025/2504_05xxx/2504.05741/images/aaea39edf7b8250bfea02aa21ada7ea2b7adefa9180fb407fa2fe28f0a5ac9d0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..de9752f77e778ec2ba766071586cc2d0b84ec6d0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/aaea39edf7b8250bfea02aa21ada7ea2b7adefa9180fb407fa2fe28f0a5ac9d0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03072bc9fb3fcb90fca5fd67477628a36f2c915075ce492c7608bd3e6f293130 +size 8053 diff --git a/data/2025/2504_05xxx/2504.05741/images/b345813acb199a689f123be1245f8188a9c4a1c20170c569d735c61297c4af60.jpg b/data/2025/2504_05xxx/2504.05741/images/b345813acb199a689f123be1245f8188a9c4a1c20170c569d735c61297c4af60.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9421a66df9811fe7e0fb79dec3da36243ba5adf9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/b345813acb199a689f123be1245f8188a9c4a1c20170c569d735c61297c4af60.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40654c7bec08f6b922465770521c0a9d6184c3b9956f62bcb013e2d246b74e92 +size 6876 diff --git a/data/2025/2504_05xxx/2504.05741/images/b6145f4c8999c8423105577211463cbb37dd58825c7eacd8c3ec4723337ef0d0.jpg b/data/2025/2504_05xxx/2504.05741/images/b6145f4c8999c8423105577211463cbb37dd58825c7eacd8c3ec4723337ef0d0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..569f338c6220b4c3f22ce58b0831ea5ddae23ecd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/b6145f4c8999c8423105577211463cbb37dd58825c7eacd8c3ec4723337ef0d0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:792d07a92ee7c38943ff481dd377b77d9103f7f9fa2f062eac5b2ed1ef6a530e +size 4207 diff --git a/data/2025/2504_05xxx/2504.05741/images/ba57dee1c812d45caeb0fdacc9dbbc045f6fbdb63a51e0e248e9d6101fefe26e.jpg b/data/2025/2504_05xxx/2504.05741/images/ba57dee1c812d45caeb0fdacc9dbbc045f6fbdb63a51e0e248e9d6101fefe26e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..84be36e2e8f30e416375a63fa6f04ff6becd1983 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/ba57dee1c812d45caeb0fdacc9dbbc045f6fbdb63a51e0e248e9d6101fefe26e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2c782dfd5b9e83c69616783ea07fed500e287dc5e4d75acf029c9770e941be7 +size 14876 diff --git a/data/2025/2504_05xxx/2504.05741/images/bee9931fee3a7da5bea1ea0c7e74e9aecc400b894acf32b949526bc5462ef3ee.jpg b/data/2025/2504_05xxx/2504.05741/images/bee9931fee3a7da5bea1ea0c7e74e9aecc400b894acf32b949526bc5462ef3ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..729e471152110a3c0dc992f949fc9abde3be733b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/bee9931fee3a7da5bea1ea0c7e74e9aecc400b894acf32b949526bc5462ef3ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2fe5f1d0c1d67400d3e961e120730e32d2d75806d0bf84283dbb0a1df7c35c8 +size 6731 diff --git a/data/2025/2504_05xxx/2504.05741/images/bf81283f9948fab0936f38300930de61eaaa0b6497ca1e53d2e49a3d22d619af.jpg b/data/2025/2504_05xxx/2504.05741/images/bf81283f9948fab0936f38300930de61eaaa0b6497ca1e53d2e49a3d22d619af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f59f70651dbd5b7e548ccd42fb8da7db5c6b3db4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/bf81283f9948fab0936f38300930de61eaaa0b6497ca1e53d2e49a3d22d619af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae7bdec361c57ab057438c7f340f409e9aed4dec6686f6f259f39ef6ce05505c +size 7006 diff --git a/data/2025/2504_05xxx/2504.05741/images/bfb0f79f9a742e80bcaf710e1e90883e1635dc632020949ed444d444d088c5f7.jpg b/data/2025/2504_05xxx/2504.05741/images/bfb0f79f9a742e80bcaf710e1e90883e1635dc632020949ed444d444d088c5f7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b54f2c8e61a49130fa0c192f77796f0d376c46bd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/bfb0f79f9a742e80bcaf710e1e90883e1635dc632020949ed444d444d088c5f7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4656f285cf641ba116d633a5fa43384b44a1c0c872ca72d8bc747c50256240bc +size 7070 diff --git a/data/2025/2504_05xxx/2504.05741/images/c3875c50b27ad48d26b25e205cc0b4387ac9b59eb0f16fcf47d31401462f4ec8.jpg b/data/2025/2504_05xxx/2504.05741/images/c3875c50b27ad48d26b25e205cc0b4387ac9b59eb0f16fcf47d31401462f4ec8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..06961ba09a550b1e5e25e01b6e8886a9a1ce88b5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/c3875c50b27ad48d26b25e205cc0b4387ac9b59eb0f16fcf47d31401462f4ec8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa6177031496aebacef9566c52c3a0a54d56f40399e6a7504d0684d7c225ba6c +size 7626 diff --git a/data/2025/2504_05xxx/2504.05741/images/c75b652e54027176b3f475b3533b97ed68ff31d9667b067ef60336a03c67441e.jpg b/data/2025/2504_05xxx/2504.05741/images/c75b652e54027176b3f475b3533b97ed68ff31d9667b067ef60336a03c67441e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3acffe1c7c5aa98ba6677ff069497b5f04cc7454 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/c75b652e54027176b3f475b3533b97ed68ff31d9667b067ef60336a03c67441e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6724a6f8db54e26b8a4f8882a0aace369fb4c14f2c9cc0f393948aa24c0b8a37 +size 4520 diff --git a/data/2025/2504_05xxx/2504.05741/images/c899eae3b28c35102cbbf666a0d15439efad97c88a461ea4e357d1a4523bec7a.jpg b/data/2025/2504_05xxx/2504.05741/images/c899eae3b28c35102cbbf666a0d15439efad97c88a461ea4e357d1a4523bec7a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..355cb9447a133721e10993e976e965cbbb8f21b0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/c899eae3b28c35102cbbf666a0d15439efad97c88a461ea4e357d1a4523bec7a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a85023c2275434e1a14ef8d7f445e700889fa438a5e9414bb87059f75ef3eb8 +size 56653 diff --git a/data/2025/2504_05xxx/2504.05741/images/d17765a1e5feab4d07f1bd3f7602575d7a1ed95b04842153a41df62ac97583ba.jpg b/data/2025/2504_05xxx/2504.05741/images/d17765a1e5feab4d07f1bd3f7602575d7a1ed95b04842153a41df62ac97583ba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..619c27373b0b73c1df1c42641e71afa4a0689890 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/d17765a1e5feab4d07f1bd3f7602575d7a1ed95b04842153a41df62ac97583ba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53310a0bd7631b73d88ebbba7316a87b3251e2cb9e7432ba7528f161f8ee6bd2 +size 4901 diff --git a/data/2025/2504_05xxx/2504.05741/images/d24970774cf36353f1a5ade86c1c0aaaad9182aff0186e1a5c8b4c07a041452a.jpg b/data/2025/2504_05xxx/2504.05741/images/d24970774cf36353f1a5ade86c1c0aaaad9182aff0186e1a5c8b4c07a041452a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ba3c2ef6213a7a9a5fd36f1e4da90904a1fc3cdc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/d24970774cf36353f1a5ade86c1c0aaaad9182aff0186e1a5c8b4c07a041452a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed6f0fa4d36f4f2852506cfc390a3535b24ca54ca52796db33547564bbd707a4 +size 2441 diff --git a/data/2025/2504_05xxx/2504.05741/images/d525d165270ddca02ea8d8ee5b095dd81e4b95d3eea8b81199d47438d2761268.jpg b/data/2025/2504_05xxx/2504.05741/images/d525d165270ddca02ea8d8ee5b095dd81e4b95d3eea8b81199d47438d2761268.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c1316d157178ff3c41c23ea3d90c901fce74b9c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/d525d165270ddca02ea8d8ee5b095dd81e4b95d3eea8b81199d47438d2761268.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4506f372bbacbe96f439b4c095e016c52bf8fbbce29f951b5f9747305f54dc7 +size 7749 diff --git a/data/2025/2504_05xxx/2504.05741/images/d644e55597e5568b650ac9c7f700087e5b4a931d95fdf3b462fa03c854c68e3b.jpg b/data/2025/2504_05xxx/2504.05741/images/d644e55597e5568b650ac9c7f700087e5b4a931d95fdf3b462fa03c854c68e3b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e5ad90efdcf476cddb19c67a9f4d89d892a95b6c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/d644e55597e5568b650ac9c7f700087e5b4a931d95fdf3b462fa03c854c68e3b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52325c7b137cfe538a6ca3c79e9186fc8529bb77487a084d6b418ef9e1869d8c +size 19750 diff --git a/data/2025/2504_05xxx/2504.05741/images/d832ac8214ead6291814ec65b36289871ce8a3fe75d47d1a9b200cf3022d9f90.jpg b/data/2025/2504_05xxx/2504.05741/images/d832ac8214ead6291814ec65b36289871ce8a3fe75d47d1a9b200cf3022d9f90.jpg new file mode 100644 index 0000000000000000000000000000000000000000..acdc9f59ea07a0bb0967ada73735cad52d43b038 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/d832ac8214ead6291814ec65b36289871ce8a3fe75d47d1a9b200cf3022d9f90.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d3833983975abd4f8e9030b8e0be461f4fdc7108d0f0d08a2c6166c874f715e +size 27209 diff --git a/data/2025/2504_05xxx/2504.05741/images/dca77a27d116c6cd23d2f0918415991d238bd764976495334f8c2f228ce3bb09.jpg b/data/2025/2504_05xxx/2504.05741/images/dca77a27d116c6cd23d2f0918415991d238bd764976495334f8c2f228ce3bb09.jpg new file mode 100644 index 0000000000000000000000000000000000000000..473ac68f6aafe3388f0eed1d3fbc37d5a768a932 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/dca77a27d116c6cd23d2f0918415991d238bd764976495334f8c2f228ce3bb09.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7da923d57d5db6508794395f5d388ff3401f9f46b3f5c22184d41575b67605c +size 6321 diff --git a/data/2025/2504_05xxx/2504.05741/images/e347da8023d83daaeb7d2679ed18e187530d2c4a2361449fe8ee2f86e33f8b17.jpg b/data/2025/2504_05xxx/2504.05741/images/e347da8023d83daaeb7d2679ed18e187530d2c4a2361449fe8ee2f86e33f8b17.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ff6328aecbbd6f476e0a73ae9632d91742719c0e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/e347da8023d83daaeb7d2679ed18e187530d2c4a2361449fe8ee2f86e33f8b17.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1abcc09eb2849a688488729c30d9daf763efb42d7023c10a0de3aca7371edc1c +size 6678 diff --git a/data/2025/2504_05xxx/2504.05741/images/ea170857e813c06b559baee57141d84ae7bea959d35cae659af69d404d9a32e5.jpg b/data/2025/2504_05xxx/2504.05741/images/ea170857e813c06b559baee57141d84ae7bea959d35cae659af69d404d9a32e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f8a2e8e6773a1af28a51dde2df42a2e75a6a973e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/ea170857e813c06b559baee57141d84ae7bea959d35cae659af69d404d9a32e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b818c63a954ecba7f8c6de6b0d895aff1b4974dcd260d49bf92a01365d5cb487 +size 8475 diff --git a/data/2025/2504_05xxx/2504.05741/images/ead4974e709c35db97c6cbfb7ffa7b61e5d9ea3ee2de853402c30d478dacb855.jpg b/data/2025/2504_05xxx/2504.05741/images/ead4974e709c35db97c6cbfb7ffa7b61e5d9ea3ee2de853402c30d478dacb855.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0effd248d9ad53d0c00d1c37041976ea2f1c323b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/ead4974e709c35db97c6cbfb7ffa7b61e5d9ea3ee2de853402c30d478dacb855.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff755cfe0a5f4aa243d82f2f949e463084826c11ce167bba77f148a97bc45b70 +size 26048 diff --git a/data/2025/2504_05xxx/2504.05741/images/edbf3374676256bbec9e003ba5f71437d9cca7343568d0db40ce70370c6cbe35.jpg b/data/2025/2504_05xxx/2504.05741/images/edbf3374676256bbec9e003ba5f71437d9cca7343568d0db40ce70370c6cbe35.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bbd2e6cfc100c3592e0effe714b13f0ca8583adc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/edbf3374676256bbec9e003ba5f71437d9cca7343568d0db40ce70370c6cbe35.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80917c42f2d5c07ca7ab41af5bd57acf5482ccbca62ac4c56f8612977c92c7d9 +size 7915 diff --git a/data/2025/2504_05xxx/2504.05741/images/ee4c2612ac81285275068e95259e898b96d23d847186f0bc23ef8c53d733f70c.jpg b/data/2025/2504_05xxx/2504.05741/images/ee4c2612ac81285275068e95259e898b96d23d847186f0bc23ef8c53d733f70c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b464325aa7c2737d724fcfca7edcd8219149a4e1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/ee4c2612ac81285275068e95259e898b96d23d847186f0bc23ef8c53d733f70c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b39fe4aae31c35fa4f486c9e91d726bd2a0766a98c75f9ceef6ce6fb38f7355 +size 51326 diff --git a/data/2025/2504_05xxx/2504.05741/images/ef506982b570c9f78fd58fd3bebb850fa6cacd339dab1077cef17f8ffdc36eff.jpg b/data/2025/2504_05xxx/2504.05741/images/ef506982b570c9f78fd58fd3bebb850fa6cacd339dab1077cef17f8ffdc36eff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..99671bb571d444dab3eaf60bbd39779a3bb0476b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/ef506982b570c9f78fd58fd3bebb850fa6cacd339dab1077cef17f8ffdc36eff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aaacea8478c4bdf8a810973d5a47db461472f77018a92fdd7704cc61675b94c5 +size 6421 diff --git a/data/2025/2504_05xxx/2504.05741/images/ef88d14de92e600bddbd04ca0deb18eae6e257915926630273d672bfa394ac82.jpg b/data/2025/2504_05xxx/2504.05741/images/ef88d14de92e600bddbd04ca0deb18eae6e257915926630273d672bfa394ac82.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8772989ab6632dba07a271c51803890ae542226a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/ef88d14de92e600bddbd04ca0deb18eae6e257915926630273d672bfa394ac82.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4eb0e3acb6dfa63eb5a6b25fe0018c4df88765113c5e61bb68d4e92101bfd21d +size 12666 diff --git a/data/2025/2504_05xxx/2504.05741/images/efd309c3c3c45324b330f07bb4f59d04611226fa5e53057263dcb2b5a8828738.jpg b/data/2025/2504_05xxx/2504.05741/images/efd309c3c3c45324b330f07bb4f59d04611226fa5e53057263dcb2b5a8828738.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5c3d9739d15d083af257b16b34941e22d6f232d0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/efd309c3c3c45324b330f07bb4f59d04611226fa5e53057263dcb2b5a8828738.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad50f9ee6a4d0c87b76923269c0075c26204482cb993ad1daf4b26f898b5b599 +size 3710 diff --git a/data/2025/2504_05xxx/2504.05741/images/f0e7fe44ac48890dcc46337bba04d7e43598e78e34f85a499709901d7ce871dc.jpg b/data/2025/2504_05xxx/2504.05741/images/f0e7fe44ac48890dcc46337bba04d7e43598e78e34f85a499709901d7ce871dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6e7cc23e0b771f33f147ea786a944289b37efd9b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/f0e7fe44ac48890dcc46337bba04d7e43598e78e34f85a499709901d7ce871dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0541d2cb26c9153bcebcfc3e1b9b3b2b0b273e7a9724401ccd7994c2d53c428 +size 4125 diff --git a/data/2025/2504_05xxx/2504.05741/images/f15e35020f438296ec0f20c07856070badcae024af68719216a172947aa89de8.jpg b/data/2025/2504_05xxx/2504.05741/images/f15e35020f438296ec0f20c07856070badcae024af68719216a172947aa89de8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb8ec40664944f51c8c7ddc04eaf5e7912a03651 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/f15e35020f438296ec0f20c07856070badcae024af68719216a172947aa89de8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:065e9ea0a97145dfc2f87510b2e0143ac0a50adb372c023bead6f0a3d713fc0c +size 14470 diff --git a/data/2025/2504_05xxx/2504.05741/images/f252b04531ac99c22135065ab64a30e768c4a30b5909c3b088d0a7287ae98fa6.jpg b/data/2025/2504_05xxx/2504.05741/images/f252b04531ac99c22135065ab64a30e768c4a30b5909c3b088d0a7287ae98fa6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bbec6b2b25a9d542b97aa7e82a7ecfa7048690f7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/f252b04531ac99c22135065ab64a30e768c4a30b5909c3b088d0a7287ae98fa6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a781cd7e7ef7bb695866184b6b6be9bf8ce9aea240c898f5d27040ff7e225ba +size 150777 diff --git a/data/2025/2504_05xxx/2504.05741/images/f2c87fb5432980e214a9d9975e78137293848179ab75d640919142227f3d85e8.jpg b/data/2025/2504_05xxx/2504.05741/images/f2c87fb5432980e214a9d9975e78137293848179ab75d640919142227f3d85e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..422b96a7e5a64ee053d34ab864cf03b30b78c99e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/f2c87fb5432980e214a9d9975e78137293848179ab75d640919142227f3d85e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ba92d8fde305cf7c84411a2e443f92eedeb6abb55c51a5897097bf975d238da +size 3115 diff --git a/data/2025/2504_05xxx/2504.05741/images/f4ea69d751e0733816159b64e5f6726b899de25b722611fce391bc9874c2deee.jpg b/data/2025/2504_05xxx/2504.05741/images/f4ea69d751e0733816159b64e5f6726b899de25b722611fce391bc9874c2deee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..319cecf72a4e6bbce65a855963ec900c526e1f8a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/f4ea69d751e0733816159b64e5f6726b899de25b722611fce391bc9874c2deee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a04af00bbe71bc97e085212417f0bc29cfe07dcc0eb53763e0d8d72af358958c +size 4528 diff --git a/data/2025/2504_05xxx/2504.05741/images/f5b1120ec986332b1b040332bca193bfbb048be5c316f804ed609cd977122e83.jpg b/data/2025/2504_05xxx/2504.05741/images/f5b1120ec986332b1b040332bca193bfbb048be5c316f804ed609cd977122e83.jpg new file mode 100644 index 0000000000000000000000000000000000000000..342db74645b4a5ce8baf7b0cdf589c7a5c423a63 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/f5b1120ec986332b1b040332bca193bfbb048be5c316f804ed609cd977122e83.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de55be52e930595e67255410909f967a5dc8c449677ae61654988c1cc498f19f +size 6556 diff --git a/data/2025/2504_05xxx/2504.05741/images/fa74893b1abc667b3abe7101c9ff507e88e30cd4c438b34f1eb8f39e45365805.jpg b/data/2025/2504_05xxx/2504.05741/images/fa74893b1abc667b3abe7101c9ff507e88e30cd4c438b34f1eb8f39e45365805.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce84a937c2dbbad56c756fbcdbe2089768369e5d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/fa74893b1abc667b3abe7101c9ff507e88e30cd4c438b34f1eb8f39e45365805.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78685987d0e7a2b70c2d6702f4a5b47809dcfe579a6181adfae48b9d12e99174 +size 18082 diff --git a/data/2025/2504_05xxx/2504.05741/images/fc9e0189f4b580f93b7b09e6dc65626e313ee2035d702444275ea38c70f2bfb6.jpg b/data/2025/2504_05xxx/2504.05741/images/fc9e0189f4b580f93b7b09e6dc65626e313ee2035d702444275ea38c70f2bfb6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c558477dab55209a1e21a8cadf76cc539c35660f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/fc9e0189f4b580f93b7b09e6dc65626e313ee2035d702444275ea38c70f2bfb6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82ae38a385509fb34f46a264b28262f4920dfd8279896a38ab2cdc46973b2867 +size 25720 diff --git a/data/2025/2504_05xxx/2504.05741/images/fdeeb07fee366407b4c1ec2e6a90f9c92696eae5376595daabb142150d0d25fb.jpg b/data/2025/2504_05xxx/2504.05741/images/fdeeb07fee366407b4c1ec2e6a90f9c92696eae5376595daabb142150d0d25fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b7dfb2261cccc4d09f9323a84480fe8fc0b3d46c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/images/fdeeb07fee366407b4c1ec2e6a90f9c92696eae5376595daabb142150d0d25fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77097ea8403a5e14ac29078c0f9e29a1b72d2aaaf40fcc97de3bd79b5ab5bb69 +size 4227 diff --git a/data/2025/2504_05xxx/2504.05741/layout.json b/data/2025/2504_05xxx/2504.05741/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..a4fae623a01f81a9444df6e42db2a4e4cb27ca57 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05741/layout.json @@ -0,0 +1,13209 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 180, + 103, + 430, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 103, + 430, + 121 + ], + "spans": [ + { + "bbox": [ + 180, + 103, + 430, + 121 + ], + "type": "text", + "content": "DDT: Decoupled Diffusion Transformer" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 134, + 141, + 476, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 141, + 476, + 193 + ], + "spans": [ + { + "bbox": [ + 134, + 141, + 476, + 193 + ], + "type": "text", + "content": "Shuai Wang1 Zhi Tian2 Weilin Huang2 Limin Wang1, * \n1Nanjing University 2ByteDance Seed Vision \nhttps://github.com/MCG-NJU/DDT" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 71, + 211, + 216, + 366 + ], + "blocks": [ + { + "bbox": [ + 71, + 211, + 216, + 366 + ], + "lines": [ + { + "bbox": [ + 71, + 211, + 216, + 366 + ], + "spans": [ + { + "bbox": [ + 71, + 211, + 216, + 366 + ], + "type": "image", + "image_path": "0b0fcf7f95fe250a8fe6bbf44a1d260920fff8418690e8baac77374ffe3abe42.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 62, + 371, + 197, + 380 + ], + "lines": [ + { + "bbox": [ + 62, + 371, + 197, + 380 + ], + "spans": [ + { + "bbox": [ + 62, + 371, + 197, + 380 + ], + "type": "text", + "content": "(a) Our Decoupled Diffusion Transformer" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 220, + 212, + 347, + 369 + ], + "blocks": [ + { + "bbox": [ + 220, + 212, + 347, + 369 + ], + "lines": [ + { + "bbox": [ + 220, + 212, + 347, + 369 + ], + "spans": [ + { + "bbox": [ + 220, + 212, + 347, + 369 + ], + "type": "image", + "image_path": "48dcf09eaef0520cfce1c2c76b2f54623920e1af72057912b2c72e38de56a625.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 225, + 371, + 354, + 380 + ], + "lines": [ + { + "bbox": [ + 225, + 371, + 354, + 380 + ], + "spans": [ + { + "bbox": [ + 225, + 371, + 354, + 380 + ], + "type": "text", + "content": "(b) Conventional Diffusion Transformer" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 359, + 213, + 547, + 357 + ], + "blocks": [ + { + "bbox": [ + 359, + 213, + 547, + 357 + ], + "lines": [ + { + "bbox": [ + 359, + 213, + 547, + 357 + ], + "spans": [ + { + "bbox": [ + 359, + 213, + 547, + 357 + ], + "type": "image", + "image_path": "fc9e0189f4b580f93b7b09e6dc65626e313ee2035d702444275ea38c70f2bfb6.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 373, + 371, + 525, + 380 + ], + "lines": [ + { + "bbox": [ + 373, + 371, + 525, + 380 + ], + "spans": [ + { + "bbox": [ + 373, + 371, + 525, + 380 + ], + "type": "text", + "content": "(c) FID compared with Other Diffusion Models" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 384, + 555, + 407 + ], + "lines": [ + { + "bbox": [ + 55, + 384, + 555, + 407 + ], + "spans": [ + { + "bbox": [ + 55, + 384, + 555, + 407 + ], + "type": "text", + "content": "Figure 1. Our decoupled diffusion transformer (DDT-XL/2) achieves a SoTA 1.31 FID under 256 epochs. Our decoupled diffusion transformer models incorporate a condition encoder to extract semantic self-conditions and a velocity decoder to decode velocity." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 152, + 416, + 200, + 429 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 416, + 200, + 429 + ], + "spans": [ + { + "bbox": [ + 152, + 416, + 200, + 429 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 450, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 450, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 53, + 450, + 296, + 715 + ], + "type": "text", + "content": "Diffusion transformers have demonstrated remarkable generation quality, albeit requiring longer training iterations and numerous inference steps. In each denoising step, diffusion transformers encode the noisy inputs to extract the lower-frequency semantic component and then decode the higher frequency with identical modules. This scheme creates an inherent optimization dilemma: encoding low-frequency semantics necessitates reducing high-frequency components, creating tension between semantic encoding and high-frequency decoding. To resolve this challenge, we propose a new Decoupled Diffusion Transformer (DDT), with a decoupled design of a dedicated condition encoder for semantic extraction alongside a specialized velocity decoder. Our experiments reveal that a more substantial encoder yields performance improvements as model size increases. For ImageNet " + }, + { + "bbox": [ + 53, + 450, + 296, + 715 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 53, + 450, + 296, + 715 + ], + "type": "text", + "content": ", Our DDT-XL/2 achieves a new state-of-the-art performance of 1.31 FID (nearly " + }, + { + "bbox": [ + 53, + 450, + 296, + 715 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 53, + 450, + 296, + 715 + ], + "type": "text", + "content": " faster training convergence compared to previous diffusion transformers). For ImageNet " + }, + { + "bbox": [ + 53, + 450, + 296, + 715 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 53, + 450, + 296, + 715 + ], + "type": "text", + "content": ", Our DDT-XL/2 achieves a new state-of-the-art FID of 1.28. Additionally, as a beneficial by-product, our decoupled architecture enhances inference speed by enabling the sharing self" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 418, + 555, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 418, + 555, + 466 + ], + "spans": [ + { + "bbox": [ + 313, + 418, + 555, + 466 + ], + "type": "text", + "content": "condition between adjacent denoising steps. To minimize performance degradation, we propose a novel statistical dynamic programming approach to identify optimal sharing strategies." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 503, + 394, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 503, + 394, + 516 + ], + "spans": [ + { + "bbox": [ + 314, + 503, + 394, + 516 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 312, + 524, + 555, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 524, + 555, + 620 + ], + "spans": [ + { + "bbox": [ + 312, + 524, + 555, + 620 + ], + "type": "text", + "content": "Image generation is a fundamental task in computer vision research, which aims at capturing the inherent data distribution of original image datasets and generating high-quality synthetic images through distribution sampling. Diffusion models [19, 21, 29, 30, 41] have recently emerged as highly promising solutions to learn the underlying data distribution in image generation, outperforming the GAN-based models [3, 40] and Auto-Regressive models [5, 43, 51]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 620, + 556, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 620, + 556, + 694 + ], + "spans": [ + { + "bbox": [ + 313, + 620, + 556, + 694 + ], + "type": "text", + "content": "The diffusion forward process gradually adds Gaussian noise to the pristine data following an SDE forward schedule [19, 21, 41]. The denoising process learns the score estimation from this corruption process. Once the score function is accurately learned, data samples can be synthesized by numerically solving the reverse SDE [21, 29, 30, 41]." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "spans": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "type": "text", + "content": "arXiv:2504.05741v2 [cs.CV] 9 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 328, + 703, + 494, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 703, + 494, + 713 + ], + "spans": [ + { + "bbox": [ + 328, + 703, + 494, + 713 + ], + "type": "text", + "content": ": Corresponding author (lmwang@nju.edu.cn)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 167 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 167 + ], + "type": "text", + "content": "Diffusion Transformers [32, 36] introduce the transformer architecture into diffusion models to replace the traditionally dominant UNet-based model [2, 10]. Empirical evidence suggests that, given sufficient training iterations, diffusion transformers outperform conventional approaches even without relying on long residual connections [36]. Nevertheless, their slow convergence rate still poses great challenge for developing new models due to the high cost." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 170, + 294, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 170, + 294, + 314 + ], + "spans": [ + { + "bbox": [ + 55, + 170, + 294, + 314 + ], + "type": "text", + "content": "In this paper, we want to tackle the aforementioned major disadvantages from a model design perspective. Classic computer vision algorithms [4, 17, 23] strategically employ encoder-decoder architectures, prioritizing large encoders for rich feature extraction and lightweight decoders for efficient inference, while contemporary diffusion models predominantly rely on conventional decoder-only structures. We systematically investigate the underexplored potential of decoupled encoder-decoder designs in diffusion transformers, by answering the question of can decoupled encoder-decoder transformer unlock the capability of accelerated convergence and enhanced sample quality?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 315, + 295, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 315, + 295, + 531 + ], + "spans": [ + { + "bbox": [ + 55, + 315, + 295, + 531 + ], + "type": "text", + "content": "Through investigation experiments, we conclude that the plain diffusion transformer has an optimization dilemma between abstract structure information extraction and detailed appearance information recovery. Further, the diffusion transformer is limited in extracting semantic representation due to the raw pixel supervision [28, 52, 53]. To address this issue, we propose a new architecture to explicitly decouple low-frequency semantic encoding and high-frequency detailed decoding through a customized encoder-decoder design. We call this encoder-decoder diffusion transformer model as DDT (Decoupled Diffusion Transformer). DDT incorporates a condition encoder to extract semantic self-condition features. The extracted self-condition is fed into a velocity decoder along with the noisy latent to regress the velocity field. To maintain the local consistency of self-condition features of adjacent steps, we employ direct supervision of representation alignment and indirect supervision from the velocity regression loss of the decoder." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 532, + 294, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 532, + 294, + 628 + ], + "spans": [ + { + "bbox": [ + 55, + 532, + 294, + 628 + ], + "type": "text", + "content": "In the ImageNet " + }, + { + "bbox": [ + 55, + 532, + 294, + 628 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 532, + 294, + 628 + ], + "type": "text", + "content": " dataset, using the traditional off-shelf VAE [38], our decoupled diffusion transformer (DDT-XL/2) model achieves the state-of-the-art performance of 1.31 FID with interval guidance under only 256 epochs, approximately " + }, + { + "bbox": [ + 55, + 532, + 294, + 628 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 55, + 532, + 294, + 628 + ], + "type": "text", + "content": " training acceleration compared to REPA [52]. In the ImageNet " + }, + { + "bbox": [ + 55, + 532, + 294, + 628 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 55, + 532, + 294, + 628 + ], + "type": "text", + "content": " dataset, our DDT-XL/2 model achieves 1.28 FID within 500K finetuning steps." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 630, + 294, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 630, + 294, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 630, + 294, + 715 + ], + "type": "text", + "content": "Furthermore, our DDT achieves strong local consistency on its self-condition feature from the encoder. This property can significantly boost the inference speed by sharing the self-condition between adjacent steps. We formulate the optimal encoder sharing strategy solving as a classic minimal sum path problem by minimizing the performance drop of sharing self-condition among adjacent steps. We propose" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 72, + 553, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 131 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 131 + ], + "type": "text", + "content": "a statistic dynamic programming approach to find the optimal encoder sharing strategy with negligible second-level time cost. Compared with the naive uniform sharing, our dynamic programming delivers a minimal FID drop. Our contributions are summarized as follows." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 133, + 553, + 312 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 314, + 133, + 553, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 133, + 553, + 168 + ], + "spans": [ + { + "bbox": [ + 314, + 133, + 553, + 168 + ], + "type": "text", + "content": "- We propose a new decoupled diffusion transformer model, which consists of a condition encoder and a velocity decoder." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 169, + 553, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 169, + 553, + 216 + ], + "spans": [ + { + "bbox": [ + 313, + 169, + 553, + 216 + ], + "type": "text", + "content": "- We propose statistic dynamic programming to find the optimal self-condition sharing strategy to boost inference speed while keeping minimal performance downgradation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 217, + 553, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 217, + 553, + 276 + ], + "spans": [ + { + "bbox": [ + 313, + 217, + 553, + 276 + ], + "type": "text", + "content": "- In the ImageNet " + }, + { + "bbox": [ + 313, + 217, + 553, + 276 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 217, + 553, + 276 + ], + "type": "text", + "content": " dataset, using tradition SDf8d4 VAE, our decoupled diffusion transformer (DDT-XL/2) model achieves the SoTA 1.31 FID with interval guidance under only 256 epochs, approximately " + }, + { + "bbox": [ + 313, + 217, + 553, + 276 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 313, + 217, + 553, + 276 + ], + "type": "text", + "content": " training acceleration compared to REPA [52]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 276, + 553, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 276, + 553, + 312 + ], + "spans": [ + { + "bbox": [ + 313, + 276, + 553, + 312 + ], + "type": "text", + "content": "- In the ImageNet " + }, + { + "bbox": [ + 313, + 276, + 553, + 312 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 313, + 276, + 553, + 312 + ], + "type": "text", + "content": " dataset, our DDT-XL/2 model achieves the SoTA 1.28 FID, outperforming all previous methods with a significant margin." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 326, + 400, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 326, + 400, + 338 + ], + "spans": [ + { + "bbox": [ + 314, + 326, + 400, + 338 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 347, + 555, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 347, + 555, + 563 + ], + "spans": [ + { + "bbox": [ + 313, + 347, + 555, + 563 + ], + "type": "text", + "content": "Diffusion Transformers. The pioneering work of DiT [36] introduced transformers into diffusion models to replace the traditionally dominant UNet architecture [2, 10]. Empirical evidence demonstrates that given sufficient training iterations, diffusion transformers outperform conventional approaches even without relying on long residual connections. SiT [32] further validated the transformer architecture with linear flow diffusion. Following the simplicity and scalability of the diffusion transformer [32, 36], SD3 [12], Lumina [54], and PixArt [6, 7] introduced the diffusion transformer to more advanced text-to-image areas. Moreover, recently, diffusion transformers have dominated the text-to-video area with substantiated visual and motion quality [1, 20, 24]. Our decoupled diffusion transformer (DDT) presents a new variant within the diffusion transformer family. It achieves faster convergence by decoupling the low-frequency encoding and the high-frequency decoding." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 582, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 582, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 582, + 556, + 715 + ], + "type": "text", + "content": "Fast Diffusion Training. To accelerate the training efficiency of diffusion transformers, recent advances have pursued multi-faceted optimizations. Operator-centric approaches [13, 45, 48, 49] leverage efficient attention mechanisms: linear-attention variants [13, 45, 49] reduced quadratic complexity to speed up training, while sparse-attention architectures [48] prioritized sparsely relevant token interactions. Resampling approaches [12, 16] proposed lognorm sampling [12] or loss reweighting [16] techniques to stabilize training dynamics. Representation learning enhancement approaches integrate external inductive biases:" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 72, + 181, + 194 + ], + "blocks": [ + { + "bbox": [ + 59, + 72, + 181, + 194 + ], + "lines": [ + { + "bbox": [ + 59, + 72, + 181, + 194 + ], + "spans": [ + { + "bbox": [ + 59, + 72, + 181, + 194 + ], + "type": "image", + "image_path": "90bb434a635dade7122ba293d4cd70c1b69caa74dee31e00757ff64653720253.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 181, + 72, + 304, + 194 + ], + "blocks": [ + { + "bbox": [ + 181, + 72, + 304, + 194 + ], + "lines": [ + { + "bbox": [ + 181, + 72, + 304, + 194 + ], + "spans": [ + { + "bbox": [ + 181, + 72, + 304, + 194 + ], + "type": "image", + "image_path": "4205adae321fff2cd11e1a6112d2e8212e500eb09240ca6578d94e76fa6abbc7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 304, + 72, + 427, + 194 + ], + "blocks": [ + { + "bbox": [ + 304, + 72, + 427, + 194 + ], + "lines": [ + { + "bbox": [ + 304, + 72, + 427, + 194 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 427, + 194 + ], + "type": "image", + "image_path": "8dd7b645cf3beb6c96a28f5e0aef4fa66291c343736d43a91f44bcb8296a057a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 428, + 72, + 552, + 194 + ], + "blocks": [ + { + "bbox": [ + 428, + 72, + 552, + 194 + ], + "lines": [ + { + "bbox": [ + 428, + 72, + 552, + 194 + ], + "spans": [ + { + "bbox": [ + 428, + 72, + 552, + 194 + ], + "type": "image", + "image_path": "98ee0e4b3bd272e2bf413b0c1ce26416c41201ccc216b837c3587047d88462d8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 58, + 195, + 120, + 256 + ], + "blocks": [ + { + "bbox": [ + 58, + 195, + 120, + 256 + ], + "lines": [ + { + "bbox": [ + 58, + 195, + 120, + 256 + ], + "spans": [ + { + "bbox": [ + 58, + 195, + 120, + 256 + ], + "type": "image", + "image_path": "5b111d9945677c3832d9ac16cd82db57d76f31a097ccf022f7c4a6509822f31f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 120, + 195, + 181, + 256 + ], + "blocks": [ + { + "bbox": [ + 120, + 195, + 181, + 256 + ], + "lines": [ + { + "bbox": [ + 120, + 195, + 181, + 256 + ], + "spans": [ + { + "bbox": [ + 120, + 195, + 181, + 256 + ], + "type": "image", + "image_path": "240994ca06ad9c15a377423cda559735d73e4fe5a793b86a2070d7f7a5a19be9.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 181, + 195, + 243, + 256 + ], + "blocks": [ + { + "bbox": [ + 181, + 195, + 243, + 256 + ], + "lines": [ + { + "bbox": [ + 181, + 195, + 243, + 256 + ], + "spans": [ + { + "bbox": [ + 181, + 195, + 243, + 256 + ], + "type": "image", + "image_path": "3fcb27ffe48aea1ccedcd25a827cfac28fb0c50ec36b20a02dd8d0867f62749c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 243, + 195, + 304, + 256 + ], + "blocks": [ + { + "bbox": [ + 243, + 195, + 304, + 256 + ], + "lines": [ + { + "bbox": [ + 243, + 195, + 304, + 256 + ], + "spans": [ + { + "bbox": [ + 243, + 195, + 304, + 256 + ], + "type": "image", + "image_path": "1848d6f6c6f09a80d9580fe7a0472334470d7402a8f0f9ebc75f0e54f77cd2c0.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 304, + 195, + 366, + 256 + ], + "blocks": [ + { + "bbox": [ + 304, + 195, + 366, + 256 + ], + "lines": [ + { + "bbox": [ + 304, + 195, + 366, + 256 + ], + "spans": [ + { + "bbox": [ + 304, + 195, + 366, + 256 + ], + "type": "image", + "image_path": "d525d165270ddca02ea8d8ee5b095dd81e4b95d3eea8b81199d47438d2761268.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 366, + 195, + 428, + 256 + ], + "blocks": [ + { + "bbox": [ + 366, + 195, + 428, + 256 + ], + "lines": [ + { + "bbox": [ + 366, + 195, + 428, + 256 + ], + "spans": [ + { + "bbox": [ + 366, + 195, + 428, + 256 + ], + "type": "image", + "image_path": "8f5b86f4867d6ab8b42c2472d952666b81efbbf0cd189ee083b33702d9a4da62.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 428, + 195, + 489, + 256 + ], + "blocks": [ + { + "bbox": [ + 428, + 195, + 489, + 256 + ], + "lines": [ + { + "bbox": [ + 428, + 195, + 489, + 256 + ], + "spans": [ + { + "bbox": [ + 428, + 195, + 489, + 256 + ], + "type": "image", + "image_path": "a9a9a05c460fbfeb08572fc2b2eba0ff359e304716e9d7dfb2b8424d8103dd3e.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 489, + 195, + 552, + 256 + ], + "blocks": [ + { + "bbox": [ + 489, + 195, + 552, + 256 + ], + "lines": [ + { + "bbox": [ + 489, + 195, + 552, + 256 + ], + "spans": [ + { + "bbox": [ + 489, + 195, + 552, + 256 + ], + "type": "image", + "image_path": "844ef683faa78a606fedc9e3b46065adc7dfdff3ac5e76d82e994d3fa16edd1f.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 58, + 257, + 120, + 319 + ], + "blocks": [ + { + "bbox": [ + 58, + 257, + 120, + 319 + ], + "lines": [ + { + "bbox": [ + 58, + 257, + 120, + 319 + ], + "spans": [ + { + "bbox": [ + 58, + 257, + 120, + 319 + ], + "type": "image", + "image_path": "aaea39edf7b8250bfea02aa21ada7ea2b7adefa9180fb407fa2fe28f0a5ac9d0.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 329, + 555, + 352 + ], + "lines": [ + { + "bbox": [ + 55, + 329, + 555, + 352 + ], + "spans": [ + { + "bbox": [ + 55, + 329, + 555, + 352 + ], + "type": "text", + "content": "Figure 2. Selected " + }, + { + "bbox": [ + 55, + 329, + 555, + 352 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 329, + 555, + 352 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 329, + 555, + 352 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 55, + 329, + 555, + 352 + ], + "type": "text", + "content": " resolution samples. Generated from DDT-XL/2 trained on ImageNet " + }, + { + "bbox": [ + 55, + 329, + 555, + 352 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 329, + 555, + 352 + ], + "type": "text", + "content": " resolution and ImageNet " + }, + { + "bbox": [ + 55, + 329, + 555, + 352 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 55, + 329, + 555, + 352 + ], + "type": "text", + "content": " resolution with CFG = 4.0." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 120, + 257, + 181, + 319 + ], + "blocks": [ + { + "bbox": [ + 120, + 257, + 181, + 319 + ], + "lines": [ + { + "bbox": [ + 120, + 257, + 181, + 319 + ], + "spans": [ + { + "bbox": [ + 120, + 257, + 181, + 319 + ], + "type": "image", + "image_path": "3eb3bb847dbdbe1a2a47a22afa09e43d4aa5684a5ada149fd7f1037369596351.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 181, + 257, + 243, + 319 + ], + "blocks": [ + { + "bbox": [ + 181, + 257, + 243, + 319 + ], + "lines": [ + { + "bbox": [ + 181, + 257, + 243, + 319 + ], + "spans": [ + { + "bbox": [ + 181, + 257, + 243, + 319 + ], + "type": "image", + "image_path": "bfb0f79f9a742e80bcaf710e1e90883e1635dc632020949ed444d444d088c5f7.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 243, + 257, + 304, + 319 + ], + "blocks": [ + { + "bbox": [ + 243, + 257, + 304, + 319 + ], + "lines": [ + { + "bbox": [ + 243, + 257, + 304, + 319 + ], + "spans": [ + { + "bbox": [ + 243, + 257, + 304, + 319 + ], + "type": "image", + "image_path": "f5b1120ec986332b1b040332bca193bfbb048be5c316f804ed609cd977122e83.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 304, + 257, + 366, + 319 + ], + "blocks": [ + { + "bbox": [ + 304, + 257, + 366, + 319 + ], + "lines": [ + { + "bbox": [ + 304, + 257, + 366, + 319 + ], + "spans": [ + { + "bbox": [ + 304, + 257, + 366, + 319 + ], + "type": "image", + "image_path": "1938f29b7db82456a19fe9cf4e7e990304681543c87d5423f264908aa8700184.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 367, + 257, + 428, + 319 + ], + "blocks": [ + { + "bbox": [ + 367, + 257, + 428, + 319 + ], + "lines": [ + { + "bbox": [ + 367, + 257, + 428, + 319 + ], + "spans": [ + { + "bbox": [ + 367, + 257, + 428, + 319 + ], + "type": "image", + "image_path": "99a12143a9ba00a6686a3a5f3c2d8ab84698fe37bec78394975ec8ead2901f90.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 428, + 257, + 489, + 319 + ], + "blocks": [ + { + "bbox": [ + 428, + 257, + 489, + 319 + ], + "lines": [ + { + "bbox": [ + 428, + 257, + 489, + 319 + ], + "spans": [ + { + "bbox": [ + 428, + 257, + 489, + 319 + ], + "type": "image", + "image_path": "0af46efc8109063459d92e3ac952af1d0db4c1f4374abc2379210c2a8158df0c.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 489, + 257, + 552, + 319 + ], + "blocks": [ + { + "bbox": [ + 489, + 257, + 552, + 319 + ], + "lines": [ + { + "bbox": [ + 489, + 257, + 552, + 319 + ], + "spans": [ + { + "bbox": [ + 489, + 257, + 552, + 319 + ], + "type": "image", + "image_path": "9a59fb5c4833508f282db8aecf20b1a86b1c6cb5a7ed19663e45728fc0e7c7f4.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 57, + 363, + 294, + 481 + ], + "blocks": [ + { + "bbox": [ + 57, + 363, + 294, + 481 + ], + "lines": [ + { + "bbox": [ + 57, + 363, + 294, + 481 + ], + "spans": [ + { + "bbox": [ + 57, + 363, + 294, + 481 + ], + "type": "image", + "image_path": "4e983e2fbbacb92b9637d32f798ca1cd2a3d14d6e611445b59a01bd33e0f2f41.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 491, + 295, + 535 + ], + "lines": [ + { + "bbox": [ + 55, + 491, + 295, + 535 + ], + "spans": [ + { + "bbox": [ + 55, + 491, + 295, + 535 + ], + "type": "text", + "content": "Figure 3. The reverse-SDE process (generation) of SiT-XL/2 in " + }, + { + "bbox": [ + 55, + 491, + 295, + 535 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 55, + 491, + 295, + 535 + ], + "type": "text", + "content": " space. There is a clear generation process from low frequency to high frequency. Most of the time is spent on generating high-frequency details (from " + }, + { + "bbox": [ + 55, + 491, + 295, + 535 + ], + "type": "inline_equation", + "content": "t = 0.4" + }, + { + "bbox": [ + 55, + 491, + 295, + 535 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 55, + 491, + 295, + 535 + ], + "type": "inline_equation", + "content": "t = 1.0" + }, + { + "bbox": [ + 55, + 491, + 295, + 535 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "bbox": [ + 55, + 548, + 295, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 548, + 295, + 620 + ], + "spans": [ + { + "bbox": [ + 55, + 548, + 295, + 620 + ], + "type": "text", + "content": "REPA [52], RCG [27] and DoD [53] borrowed vision-specific priors into diffusion training, while masked modeling techniques [14, 15] strengthened spatial reasoning by enforcing structured feature completion during denoising. Collectively, these strategies address computational, sampling, and representational bottlenecks." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 55, + 632, + 178, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 632, + 178, + 647 + ], + "spans": [ + { + "bbox": [ + 55, + 632, + 178, + 647 + ], + "type": "text", + "content": "3. Preliminary Analysis" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "type": "text", + "content": "Linear-based flow matching [29, 30, 32] represents a specialized family of diffusion models that we focus on as our primary analytical subject due to its simplicity and efficiency. For the convenience of discussion, in certain situations, diffusion and flow-matching will be used interchange-" + } + ] + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 317, + 360, + 436, + 451 + ], + "blocks": [ + { + "bbox": [ + 317, + 360, + 436, + 451 + ], + "lines": [ + { + "bbox": [ + 317, + 360, + 436, + 451 + ], + "spans": [ + { + "bbox": [ + 317, + 360, + 436, + 451 + ], + "type": "image", + "image_path": "6ff463028d0d43933dd6eb9fe5122b58039284839b9e9f78028cca72c059078a.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 459, + 555, + 503 + ], + "lines": [ + { + "bbox": [ + 313, + 459, + 555, + 503 + ], + "spans": [ + { + "bbox": [ + 313, + 459, + 555, + 503 + ], + "type": "text", + "content": "Figure 4. The FID50K metric of SiT-XL/2 for different timeshift values. We employ a 2-nd order Adams-like solver to collect the performance. Allocating more computation at noisy steps significantly improves the performance." + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 436, + 361, + 552, + 451 + ], + "blocks": [ + { + "bbox": [ + 436, + 361, + 552, + 451 + ], + "lines": [ + { + "bbox": [ + 436, + 361, + 552, + 451 + ], + "spans": [ + { + "bbox": [ + 436, + 361, + 552, + 451 + ], + "type": "image", + "image_path": "f15e35020f438296ec0f20c07856070badcae024af68719216a172947aa89de8.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "bbox": [ + 313, + 511, + 554, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 511, + 554, + 535 + ], + "spans": [ + { + "bbox": [ + 313, + 511, + 554, + 535 + ], + "type": "text", + "content": "ably. In this framework, " + }, + { + "bbox": [ + 313, + 511, + 554, + 535 + ], + "type": "inline_equation", + "content": "t = 0" + }, + { + "bbox": [ + 313, + 511, + 554, + 535 + ], + "type": "text", + "content": " corresponds to the pure noise timestep." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 313, + 539, + 555, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 539, + 555, + 658 + ], + "spans": [ + { + "bbox": [ + 313, + 539, + 555, + 658 + ], + "type": "text", + "content": "As illustrated in Fig. 3, diffusion models perform autoregressive refinement on spectral components [11, 37]. The diffusion transformer encodes the noisy latent to capture lower-frequency semantics before decoding higher-frequency details. However, this semantics encoding process inevitably attenuates high-frequency information, creating an optimization dilemma. This observation motivates our proposal to decouple the conventional decode-only diffusion transformer into an explicit encoder-decoder architecture." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 313, + 677, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 556, + 715 + ], + "type": "text", + "content": "Lemma 1. For a linear flow-matching noise scheduler at timestep " + }, + { + "bbox": [ + 313, + 677, + 556, + 715 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 677, + 556, + 715 + ], + "type": "text", + "content": ", let us denote " + }, + { + "bbox": [ + 313, + 677, + 556, + 715 + ], + "type": "inline_equation", + "content": "K_{\\text{freq}}" + }, + { + "bbox": [ + 313, + 677, + 556, + 715 + ], + "type": "text", + "content": " as the maximum frequency of the clean data " + }, + { + "bbox": [ + 313, + 677, + 556, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_{\\text{data}}" + }, + { + "bbox": [ + 313, + 677, + 556, + 715 + ], + "type": "text", + "content": ". The maximum retained frequency" + } + ] + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 167, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 167, + 83 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 167, + 83 + ], + "type": "text", + "content": "in the noisy latent satisfies:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 95, + 91, + 295, + 125 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 91, + 295, + 125 + ], + "spans": [ + { + "bbox": [ + 95, + 91, + 295, + 125 + ], + "type": "interline_equation", + "content": "f _ {\\max } (t) > \\min \\left(\\left(\\frac {t}{1 - t}\\right) ^ {2}, K _ {\\text {f r e q}}\\right). \\tag {1}", + "image_path": "dca77a27d116c6cd23d2f0918415991d238bd764976495334f8c2f228ce3bb09.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 133, + 296, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 133, + 296, + 348 + ], + "spans": [ + { + "bbox": [ + 55, + 133, + 296, + 348 + ], + "type": "text", + "content": "Lemma 1 is directly borrowed from [11, 37], we place the proof of Lemma 1 in Appendix. According to Lemma 1, as " + }, + { + "bbox": [ + 55, + 133, + 296, + 348 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 55, + 133, + 296, + 348 + ], + "type": "text", + "content": " increases to less noisy timesteps, semantic encoding becomes easier (due to noise reduction) while decoding complexity increases (as residual frequencies grow). Consider the worst-case scenario at denoising step " + }, + { + "bbox": [ + 55, + 133, + 296, + 348 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 55, + 133, + 296, + 348 + ], + "type": "text", + "content": ", the diffusion transformer encodes frequencies up to " + }, + { + "bbox": [ + 55, + 133, + 296, + 348 + ], + "type": "inline_equation", + "content": "f_{max}(t)" + }, + { + "bbox": [ + 55, + 133, + 296, + 348 + ], + "type": "text", + "content": ", to progress to step " + }, + { + "bbox": [ + 55, + 133, + 296, + 348 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 55, + 133, + 296, + 348 + ], + "type": "text", + "content": ", it must decode a residual frequency of at least " + }, + { + "bbox": [ + 55, + 133, + 296, + 348 + ], + "type": "inline_equation", + "content": "f_{max}(s) - f_{max}(t)" + }, + { + "bbox": [ + 55, + 133, + 296, + 348 + ], + "type": "text", + "content": ". Failure to decode these residual frequencies at step " + }, + { + "bbox": [ + 55, + 133, + 296, + 348 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 55, + 133, + 296, + 348 + ], + "type": "text", + "content": " creates a critical bottleneck for progression to subsequent steps. From this perspective, if allocating more of the calculations to more noisy timesteps can lead to an improvement, it means that diffusion transformers struggle with encoding lower frequency to provide semantics. Otherwise, if allocating more of the calculations to less noisy timesteps can lead to an improvement, it means that flow-matching transformers struggle with decoding higher frequency to provide fine details." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 348, + 296, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 348, + 296, + 502 + ], + "spans": [ + { + "bbox": [ + 55, + 348, + 296, + 502 + ], + "type": "text", + "content": "To figure out the bottom-necks of current diffusion models, we conducted a targeted experiment using SiT-XL/2 with a second-order Adams-like linear multistep solver. As shown in Fig. 4, by varying the time-shift values, we demonstrate that allocating more computation to early timesteps improves final performance compared to uniform scheduling. This reveals that diffusion models face challenges in more noisy steps. This leads to a key conclusion: Current diffusion transformers are fundamentally constrained by their low-frequency semantic encoding capacity. This insight motivates the exploration of encoder-decoder architectures with strategic encoder parameter allocation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 503, + 296, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 503, + 296, + 588 + ], + "spans": [ + { + "bbox": [ + 55, + 503, + 296, + 588 + ], + "type": "text", + "content": "Prior researches further support this perspective. While lightweight diffusion MLP heads demonstrate limited decoding capacity, MAR [28] overcomes this limitation through semantic latents produced by its masked backbones, enabling high-quality image generation. Similarly, REPA [52] enhances low-frequency encoding through alignment with pre-trained vision foundations [35]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 597, + 111, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 597, + 111, + 609 + ], + "spans": [ + { + "bbox": [ + 55, + 597, + 111, + 609 + ], + "type": "text", + "content": "4. Method" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 617, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 617, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 617, + 296, + 715 + ], + "type": "text", + "content": "Our decoupled diffusion transformer architecture comprises a condition encoder and a velocity decoder. The condition encoder extracted the low-frequency component from noisy input, class label, and timestep to serve as a self-condition for the velocity decoder; the velocity decoder processed the noisy latent with the self-condition to regress the high-frequency velocity. We train this model using the established linear flow diffusion framework. For brevity," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 72, + 555, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 96 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 96 + ], + "type": "text", + "content": "we designate our model as DDT (Decoupled Diffusion Transformer)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 103, + 426, + 114 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 103, + 426, + 114 + ], + "spans": [ + { + "bbox": [ + 313, + 103, + 426, + 114 + ], + "type": "text", + "content": "4.1. Condition Encoder" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 121, + 555, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 121, + 555, + 204 + ], + "spans": [ + { + "bbox": [ + 313, + 121, + 555, + 204 + ], + "type": "text", + "content": "The condition encoder mirrors the architectural design and input structure of DiT/SiT with improved micro-design. It is built with interleaved Attention and FFN blocks, without long residual connections. The encoder processes three inputs, the noisy latent " + }, + { + "bbox": [ + 313, + 121, + 555, + 204 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_t" + }, + { + "bbox": [ + 313, + 121, + 555, + 204 + ], + "type": "text", + "content": ", timestep " + }, + { + "bbox": [ + 313, + 121, + 555, + 204 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 121, + 555, + 204 + ], + "type": "text", + "content": ", and class label " + }, + { + "bbox": [ + 313, + 121, + 555, + 204 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 313, + 121, + 555, + 204 + ], + "type": "text", + "content": " to extract the self-condition feature " + }, + { + "bbox": [ + 313, + 121, + 555, + 204 + ], + "type": "inline_equation", + "content": "\\boldsymbol{z}_t" + }, + { + "bbox": [ + 313, + 121, + 555, + 204 + ], + "type": "text", + "content": " through a series of stacked Attention and FFN blocks:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 382, + 213, + 555, + 228 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 382, + 213, + 555, + 228 + ], + "spans": [ + { + "bbox": [ + 382, + 213, + 555, + 228 + ], + "type": "interline_equation", + "content": "\\boldsymbol {z} _ {t} = \\operatorname {E n c o d e r} \\left(\\boldsymbol {x} _ {t}, t, y\\right). \\tag {2}", + "image_path": "efd309c3c3c45324b330f07bb4f59d04611226fa5e53057263dcb2b5a8828738.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 235, + 554, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 235, + 554, + 317 + ], + "spans": [ + { + "bbox": [ + 313, + 235, + 554, + 317 + ], + "type": "text", + "content": "Specifically, the noisy latent " + }, + { + "bbox": [ + 313, + 235, + 554, + 317 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_t" + }, + { + "bbox": [ + 313, + 235, + 554, + 317 + ], + "type": "text", + "content": " are patched into continuous tokens and then fed to extract the self-condition " + }, + { + "bbox": [ + 313, + 235, + 554, + 317 + ], + "type": "inline_equation", + "content": "\\boldsymbol{z}_t" + }, + { + "bbox": [ + 313, + 235, + 554, + 317 + ], + "type": "text", + "content": " with aforementioned encoder blocks. The timestep " + }, + { + "bbox": [ + 313, + 235, + 554, + 317 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 235, + 554, + 317 + ], + "type": "text", + "content": " and class label " + }, + { + "bbox": [ + 313, + 235, + 554, + 317 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 313, + 235, + 554, + 317 + ], + "type": "text", + "content": " serve as external-conditioning information projected into embedding. These external-condition embeddings are progressively injected into the encoded features of " + }, + { + "bbox": [ + 313, + 235, + 554, + 317 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_t" + }, + { + "bbox": [ + 313, + 235, + 554, + 317 + ], + "type": "text", + "content": " using AdaLN-Zero[36] within each encoder block." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 319, + 554, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 319, + 554, + 400 + ], + "spans": [ + { + "bbox": [ + 313, + 319, + 554, + 400 + ], + "type": "text", + "content": "To maintain local consistency of " + }, + { + "bbox": [ + 313, + 319, + 554, + 400 + ], + "type": "inline_equation", + "content": "z_{t}" + }, + { + "bbox": [ + 313, + 319, + 554, + 400 + ], + "type": "text", + "content": " across adjacent timesteps, we adopt the representation alignment technique from REPA [52]. Shown in Eq. (3), this method aligns the intermediate feature " + }, + { + "bbox": [ + 313, + 319, + 554, + 400 + ], + "type": "inline_equation", + "content": "\\mathbf{h}_i" + }, + { + "bbox": [ + 313, + 319, + 554, + 400 + ], + "type": "text", + "content": " from the " + }, + { + "bbox": [ + 313, + 319, + 554, + 400 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 313, + 319, + 554, + 400 + ], + "type": "text", + "content": "-th layer in the self-mapping encoder with the DINOV2 representation " + }, + { + "bbox": [ + 313, + 319, + 554, + 400 + ], + "type": "inline_equation", + "content": "r_*" + }, + { + "bbox": [ + 313, + 319, + 554, + 400 + ], + "type": "text", + "content": ". Consistent to REPA [52], the " + }, + { + "bbox": [ + 313, + 319, + 554, + 400 + ], + "type": "inline_equation", + "content": "h_{\\phi}" + }, + { + "bbox": [ + 313, + 319, + 554, + 400 + ], + "type": "text", + "content": " is the learnable projection MLP:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 375, + 402, + 553, + 416 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 375, + 402, + 553, + 416 + ], + "spans": [ + { + "bbox": [ + 375, + 402, + 553, + 416 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {e n c} = 1 - \\cos \\left(r _ {*}, h _ {\\phi} \\left(\\mathbf {h} _ {\\mathbf {i}}\\right)\\right). \\tag {3}", + "image_path": "719e4e21f1111305b86c6e48372ff1039979d4964464cd509e89d47859b11164.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 419, + 554, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 419, + 554, + 502 + ], + "spans": [ + { + "bbox": [ + 313, + 419, + 554, + 502 + ], + "type": "text", + "content": "This simple regularization accelerates training convergence, as shown in REPA [52], and facilitates local consistency of " + }, + { + "bbox": [ + 313, + 419, + 554, + 502 + ], + "type": "inline_equation", + "content": "\\boldsymbol{z}_t" + }, + { + "bbox": [ + 313, + 419, + 554, + 502 + ], + "type": "text", + "content": " between adjacent steps. It allows sharing the self-condition " + }, + { + "bbox": [ + 313, + 419, + 554, + 502 + ], + "type": "inline_equation", + "content": "\\boldsymbol{z}_t" + }, + { + "bbox": [ + 313, + 419, + 554, + 502 + ], + "type": "text", + "content": " produced by the encoder between adjacent steps. Our experiments demonstrate that this encoder-sharing strategy significantly enhances inference efficiency with only negligible performance degradation." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 503, + 554, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 503, + 554, + 527 + ], + "spans": [ + { + "bbox": [ + 313, + 503, + 554, + 527 + ], + "type": "text", + "content": "Additionally, the encoder also receives indirect supervision from the decoder, which we elaborate on later." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 534, + 416, + 546 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 534, + 416, + 546 + ], + "spans": [ + { + "bbox": [ + 313, + 534, + 416, + 546 + ], + "type": "text", + "content": "4.2. Velocity Decoder" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 552, + 555, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 552, + 555, + 658 + ], + "spans": [ + { + "bbox": [ + 313, + 552, + 555, + 658 + ], + "type": "text", + "content": "The velocity decoder adopts the same architectural design as the condition encoder and consists of several stacked interleaved Attention and FFN blocks, akin to DiT/SiT. It takes the noisy latent " + }, + { + "bbox": [ + 313, + 552, + 555, + 658 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_t" + }, + { + "bbox": [ + 313, + 552, + 555, + 658 + ], + "type": "text", + "content": ", timestep " + }, + { + "bbox": [ + 313, + 552, + 555, + 658 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 552, + 555, + 658 + ], + "type": "text", + "content": ", and self-conditioning " + }, + { + "bbox": [ + 313, + 552, + 555, + 658 + ], + "type": "inline_equation", + "content": "\\boldsymbol{z}_t" + }, + { + "bbox": [ + 313, + 552, + 555, + 658 + ], + "type": "text", + "content": " as inputs to estimate the velocity " + }, + { + "bbox": [ + 313, + 552, + 555, + 658 + ], + "type": "inline_equation", + "content": "\\boldsymbol{v}_t" + }, + { + "bbox": [ + 313, + 552, + 555, + 658 + ], + "type": "text", + "content": ". Unlike the encoder, we assume that class label information is already embedded within " + }, + { + "bbox": [ + 313, + 552, + 555, + 658 + ], + "type": "inline_equation", + "content": "\\boldsymbol{z}_t" + }, + { + "bbox": [ + 313, + 552, + 555, + 658 + ], + "type": "text", + "content": ". Thus, only the external-condition timestep " + }, + { + "bbox": [ + 313, + 552, + 555, + 658 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 552, + 555, + 658 + ], + "type": "text", + "content": " and self-condition feature " + }, + { + "bbox": [ + 313, + 552, + 555, + 658 + ], + "type": "inline_equation", + "content": "\\boldsymbol{z}_t" + }, + { + "bbox": [ + 313, + 552, + 555, + 658 + ], + "type": "text", + "content": " are used as condition inputs for the decoder blocks:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 381, + 668, + 553, + 681 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 668, + 553, + 681 + ], + "spans": [ + { + "bbox": [ + 381, + 668, + 553, + 681 + ], + "type": "interline_equation", + "content": "\\boldsymbol {v} _ {t} = \\mathbf {D e c o d e r} \\left(\\boldsymbol {x} _ {t}, t, \\boldsymbol {z} _ {t}\\right). \\tag {4}", + "image_path": "825c285a9b88903c4b94ff2f427c70b668d7df96ef75f9c04693bd0bf53675c7.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 689, + 554, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 554, + 714 + ], + "type": "text", + "content": "As demonstrated previously, to further improve consistency of self-condition " + }, + { + "bbox": [ + 313, + 689, + 554, + 714 + ], + "type": "inline_equation", + "content": "z_{t}" + }, + { + "bbox": [ + 313, + 689, + 554, + 714 + ], + "type": "text", + "content": " between adjacent steps, we employ" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 297, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 297, + 109 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 297, + 109 + ], + "type": "text", + "content": "AdaLN-Zero [36] to inject " + }, + { + "bbox": [ + 55, + 72, + 297, + 109 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_t" + }, + { + "bbox": [ + 55, + 72, + 297, + 109 + ], + "type": "text", + "content": " into the decoder feature. The decoder is trained with the flow matching loss as shown in Eq. (5):" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 95, + 116, + 295, + 144 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 116, + 295, + 144 + ], + "spans": [ + { + "bbox": [ + 95, + 116, + 295, + 144 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {d e c} = \\mathbb {E} \\left[ \\int_ {0} ^ {1} \\left| \\left(\\boldsymbol {x} _ {d a t a} - \\epsilon\\right) - \\boldsymbol {v} _ {t} \\right| ^ {2} \\mathrm {d} t \\right]. \\tag {5}", + "image_path": "2e125489df63d1dba622d543f1d89100bb70834641556935ea63e93e2a42a6ee.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 149, + 182, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 149, + 182, + 163 + ], + "spans": [ + { + "bbox": [ + 55, + 149, + 182, + 163 + ], + "type": "text", + "content": "4.3. Sampling acceleration" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 167, + 296, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 167, + 296, + 239 + ], + "spans": [ + { + "bbox": [ + 55, + 167, + 296, + 239 + ], + "type": "text", + "content": "By incorporating explicit representation alignment into the encoder and implicit self-conditioning injection into the decoder, we achieve local consistency of " + }, + { + "bbox": [ + 55, + 167, + 296, + 239 + ], + "type": "inline_equation", + "content": "z_{t}" + }, + { + "bbox": [ + 55, + 167, + 296, + 239 + ], + "type": "text", + "content": " across adjacent steps during training (shown in Fig. 5). This enables us to share " + }, + { + "bbox": [ + 55, + 167, + 296, + 239 + ], + "type": "inline_equation", + "content": "z_{t}" + }, + { + "bbox": [ + 55, + 167, + 296, + 239 + ], + "type": "text", + "content": " within a suitable local range, reducing the computational burden on the self-mapping encoder." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "spans": [ + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "text", + "content": "Formally, given total inference steps " + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "text", + "content": " and encoder computation bugets " + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "text", + "content": ", thus the sharing ratio is " + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "inline_equation", + "content": "1 - \\frac{K}{N}" + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "text", + "content": ", we define " + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "inline_equation", + "content": "|\\Phi| = K" + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "text", + "content": " as the set of timesteps where the self-condition is recalculated, as shown in Equation 6. If the current timestep " + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "text", + "content": " is not in " + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "text", + "content": ", we reuse the previously computed " + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "inline_equation", + "content": "z_{t - \\Delta t}" + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "inline_equation", + "content": "z_t" + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "text", + "content": ". Otherwise, we recompute " + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "inline_equation", + "content": "z_t" + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "text", + "content": " using the encoder and the current noisy latent " + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "inline_equation", + "content": "x_t" + }, + { + "bbox": [ + 55, + 239, + 296, + 323 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 97, + 331, + 295, + 364 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 331, + 295, + 364 + ], + "spans": [ + { + "bbox": [ + 97, + 331, + 295, + 364 + ], + "type": "interline_equation", + "content": "\\boldsymbol {z} _ {t} = \\left\\{ \\begin{array}{l l} \\boldsymbol {z} _ {t - \\Delta t}, & \\text {i f} t \\notin \\Phi \\\\ \\mathbf {E n c o d e r} \\left(\\boldsymbol {x} _ {t}, t, y\\right), & \\text {i f} t \\in \\Phi \\end{array} \\right. \\tag {6}", + "image_path": "bf81283f9948fab0936f38300930de61eaaa0b6497ca1e53d2e49a3d22d619af.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 376, + 296, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 376, + 296, + 509 + ], + "spans": [ + { + "bbox": [ + 55, + 376, + 296, + 509 + ], + "type": "text", + "content": "Uniform Encoder Sharing. This naive approach recalculate self-condition " + }, + { + "bbox": [ + 55, + 376, + 296, + 509 + ], + "type": "inline_equation", + "content": "z_{t}" + }, + { + "bbox": [ + 55, + 376, + 296, + 509 + ], + "type": "text", + "content": " every " + }, + { + "bbox": [ + 55, + 376, + 296, + 509 + ], + "type": "inline_equation", + "content": "\\frac{N}{K}" + }, + { + "bbox": [ + 55, + 376, + 296, + 509 + ], + "type": "text", + "content": " steps. Previous work, such as DeepCache [33], uses this naive handcrafted uniform " + }, + { + "bbox": [ + 55, + 376, + 296, + 509 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 55, + 376, + 296, + 509 + ], + "type": "text", + "content": " set to accelerate UNet models. However, UNet models, trained solely with a denoising loss and lacking robust representation alignment, exhibit less regularized local consistency in deeper features across adjacent steps compared to our DDT model. Also, we will propose a simple and elegant statistic dynamic programming algorithm to construct " + }, + { + "bbox": [ + 55, + 376, + 296, + 509 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 55, + 376, + 296, + 509 + ], + "type": "text", + "content": ". Our statistic dynamic programming can exploit the optimal " + }, + { + "bbox": [ + 55, + 376, + 296, + 509 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 55, + 376, + 296, + 509 + ], + "type": "text", + "content": " set optimally compared to the naive approaches [33]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "spans": [ + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "text", + "content": "Statistic Dynamic Programming. We construct the statistic similarity matrix of " + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "inline_equation", + "content": "z_{t}" + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "text", + "content": " among different steps " + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "inline_equation", + "content": "\\mathbf{S} \\in R^{N \\times N}" + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "text", + "content": " using cosine distance. The optimal " + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "text", + "content": " set would guarantee the total similarity cost " + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "inline_equation", + "content": "-\\sum_{k}^{K} \\sum_{i = \\Phi_{k}}^{\\Phi_{k + 1}} S[\\Phi_{k}, i]" + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "text", + "content": " achieves global minimal. This question is a well-formed classic minimal sum path problem, it can be solved by dynamic programming. As shown in Eq. (8), we donate " + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_{i}^{k}" + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "text", + "content": " as cost and " + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "inline_equation", + "content": "\\mathbf{P}_{i}^{k}" + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "text", + "content": " as traced path when " + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "inline_equation", + "content": "\\Phi_{k} = i" + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "text", + "content": ". The state transition function from " + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_{j}^{k - 1}" + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_{i}^{k}" + }, + { + "bbox": [ + 55, + 521, + 296, + 633 + ], + "type": "text", + "content": " follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 91, + 641, + 295, + 666 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 641, + 295, + 666 + ], + "spans": [ + { + "bbox": [ + 91, + 641, + 295, + 666 + ], + "type": "interline_equation", + "content": "\\mathbf {C} _ {i} ^ {k} = \\min _ {j = 0} ^ {i} \\left\\{\\mathbf {C} _ {j} ^ {k - 1} - \\Sigma_ {l = j} ^ {i} \\mathbf {S} [ j, l ] \\right\\}. \\tag {7}", + "image_path": "874ff5454adc8cf556cf1b558751acd1189bf8a76765ffe3a13291cc381daac0.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 93, + 666, + 295, + 683 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 666, + 295, + 683 + ], + "spans": [ + { + "bbox": [ + 93, + 666, + 295, + 683 + ], + "type": "interline_equation", + "content": "\\mathbf {P} _ {i} ^ {k} = \\operatorname {a r g m i n} _ {j = 0} ^ {i} \\left\\{\\mathbf {C} _ {i} ^ {k - 1} - \\Sigma_ {l = j} ^ {i} \\mathbf {S} [ j, l ] \\right\\}. \\tag {8}", + "image_path": "3e79e639e13e47101e33d977d48cadf8bbff9a2854bc79f59e8e311cf8310e95.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "text", + "content": "After obtaining the cost matrix " + }, + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "text", + "content": " and tracked path " + }, + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{P}" + }, + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "text", + "content": ", the optimal " + }, + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "text", + "content": " can be solved by backtracking " + }, + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{P}" + }, + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{P}_N^K" + }, + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 71, + 391, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 71, + 391, + 85 + ], + "spans": [ + { + "bbox": [ + 314, + 71, + 391, + 85 + ], + "type": "text", + "content": "5. Experiment" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 91, + 555, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 91, + 555, + 247 + ], + "spans": [ + { + "bbox": [ + 313, + 91, + 555, + 247 + ], + "type": "text", + "content": "We conduct experiments on 256x256 ImageNet datasets. The total training batch size is set to 256. Consistent with methodological approaches such as SiT [32], DiT [36], and REPA [52], we employed the Adam optimizer with a constant learning rate of 0.0001 throughout the entire training process. To ensure a fair comparative analysis, we did not use gradient clipping and learning rate warm-up techniques. Our default training infrastructure consisted of " + }, + { + "bbox": [ + 313, + 91, + 555, + 247 + ], + "type": "inline_equation", + "content": "16 \\times" + }, + { + "bbox": [ + 313, + 91, + 555, + 247 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 313, + 91, + 555, + 247 + ], + "type": "inline_equation", + "content": "8 \\times" + }, + { + "bbox": [ + 313, + 91, + 555, + 247 + ], + "type": "text", + "content": " A100 GPUs. For sampling, we take the Euler solver with 250 steps as the default choice. As for the VAE, we take the off-shelf VAE-ft-EMA with a downsample factor of 8 from Huggingface1. We report FID [18], sFID [34], IS [39], Precision and Recall [25]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 252, + 428, + 265 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 252, + 428, + 265 + ], + "spans": [ + { + "bbox": [ + 313, + 252, + 428, + 265 + ], + "type": "text", + "content": "5.1. Improved baselines" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 270, + 555, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 270, + 555, + 437 + ], + "spans": [ + { + "bbox": [ + 313, + 270, + 555, + 437 + ], + "type": "text", + "content": "Recent architectural improvements such as SwiGLU [46, 47], RoPE [42], and RMSNorm [46, 47] have been extensively validated in the research community [8, 31, 50]. Additionally, lognorm sampling [12] has demonstrated significant benefits for training convergence. Consequently, we developed improved baseline models by incorporating these advanced techniques, drawing inspiration from recent works in the field. The performance of these improved baselines is comprehensively provided in Tab. 2. To validate the reliability of our implementation, we also reproduced the results for REPA-B/2, achieving metrics that marginally exceed those originally reported in the REPA[52]. These reproduction results provide additional confidence in the robustness of our approach." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 437, + 554, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 437, + 554, + 510 + ], + "spans": [ + { + "bbox": [ + 313, + 437, + 554, + 510 + ], + "type": "text", + "content": "The improved baselines in our Tab. 2 consistently outperform their predecessors without REPA. However, upon implementing REPA, performance rapidly approaches a saturation point. This is particularly evident in the XL model size, where incremental technique improvements yield diminishingly small gains." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 514, + 495, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 514, + 495, + 528 + ], + "spans": [ + { + "bbox": [ + 313, + 514, + 495, + 528 + ], + "type": "text", + "content": "5.2. Metric comparison with baselines" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 532, + 555, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 532, + 555, + 688 + ], + "spans": [ + { + "bbox": [ + 313, + 532, + 555, + 688 + ], + "type": "text", + "content": "We present the performances of different-size models at 400K training steps in Tab. 2. Our diffusion encoder-decoder transformer(DDT) family demonstrates consistent and significant improvements across various model sizes. Our DDT-B/2(8En4De) model exceeds Improved-REPA-B/2 by 2.8 FID gains. Our DDT-XL/2(22En6De) exceeds REPA-XL/2 by 1.3 FID gains. While the decoder-only diffusion transformers approach performance saturation with REPA[52], our DDT models continue to deliver superior results. The incremental technique improvements show diminishing gains, particularly in larger model sizes. However, our DDT models maintain a significant performance advantage, underscoring the effectiveness of our approach." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 313, + 693, + 553, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 693, + 553, + 712 + ], + "spans": [ + { + "bbox": [ + 313, + 693, + 553, + 712 + ], + "type": "text", + "content": "1https://huggingface.co/stabilityai/sd-vae-ft-ema" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 85, + 70, + 523, + 334 + ], + "blocks": [ + { + "bbox": [ + 85, + 70, + 523, + 334 + ], + "lines": [ + { + "bbox": [ + 85, + 70, + 523, + 334 + ], + "spans": [ + { + "bbox": [ + 85, + 70, + 523, + 334 + ], + "type": "table", + "html": "
ParamsEpochs256×256, w/o CFG256×256, w/ CFG
FID↓IS↑Pre.↑Rec.↑FID↓IS↑Pre.↑Rec.↑
MAR-B [28]208M8003.48192.40.780.582.31281.70.820.57
CausalFusion [9]368M8005.12166.10.730.661.94264.40.820.59
LDM-4 [38]400M17010.56103.50.710.623.6247.70.870.48
DDT-L (Ours)458M807.98128.10.680.671.64310.50.810.61
MAR-L [28]479M8002.6221.40.790.601.78296.00.810.60
VAVAE [50]675M8002.17205.60.770.651.35295.30.790.65
CausalFusion [9]676M8003.61180.90.750.661.77282.30.820.61
ADM [10]554M40010.94-0.690.634.59186.70.820.52
DiT-XL [36]675M14009.62121.50.670.672.27278.20.830.57
SiT-XL [32]675M14008.3---2.06270.30.820.59
ViT-XL [16]451M4008.10---2.06---
U-ViT-H/2 [2]501M4006.58---2.29263.90.820.57
MaskDiT [14]675M16005.69178.00.740.602.28276.60.800.61
FlowDCN [48]618M4008.36122.50.690.652.00263.10.820.58
RDM [44]553M/5.27153.40.750.621.99260.40.810.58
REPA [52]675M8005.9157.80.700.691.42305.70.800.64
DDT-XL (Ours)675M806.62135.20.690.671.52263.70.780.63
DDT-XL (Ours)675M2566.30146.70.680.681.31308.10.780.62
DDT-XL (Ours)675M4006.27154.70.680.691.26310.60.790.65
", + "image_path": "f252b04531ac99c22135065ab64a30e768c4a30b5909c3b088d0a7287ae98fa6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 66, + 368, + 287, + 594 + ], + "blocks": [ + { + "bbox": [ + 55, + 335, + 555, + 360 + ], + "lines": [ + { + "bbox": [ + 55, + 335, + 555, + 360 + ], + "spans": [ + { + "bbox": [ + 55, + 335, + 555, + 360 + ], + "type": "text", + "content": "Table 1. System performance comparison on ImageNet " + }, + { + "bbox": [ + 55, + 335, + 555, + 360 + ], + "type": "inline_equation", + "content": "{256} \\times {256}" + }, + { + "bbox": [ + 55, + 335, + 555, + 360 + ], + "type": "text", + "content": " class-conditioned generation. Gray blocks mean the algorithm uses VAE trained or fine-tuned on ImageNet instead of the off-shelf SD-VAE-f8d4-ft-ema." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 66, + 368, + 287, + 594 + ], + "lines": [ + { + "bbox": [ + 66, + 368, + 287, + 594 + ], + "spans": [ + { + "bbox": [ + 66, + 368, + 287, + 594 + ], + "type": "table", + "html": "
ModelFID↓sFID↓IS↑Prec.↑Rec.↑
SiT-B/2 [32]33.06.4643.70.530.63
REPA-B/2 [52]24.46.4059.90.590.65
REPA-B/2(Reproduced)22.27.5069.10.590.65
DDT-B/2† (8En4De)21.17.8173.00.600.65
Improved-SiT-B/225.16.5458.80.570.64
Improved-REPA-B/219.16.8876.490.600.66
DDT-B/2 (8En4De)16.326.6386.00.620.66
SiT-L/2 [32]18.85.2972.00.640.64
REPA-L/2 [52]10.05.20109.20.690.65
Improved-SiT-L/212.75.4895.70.650.65
Improved-REPA-L/29.35.44116.60.670.66
DDT-L/2 (20En4De)7.985.50128.10.680.67
SiT-XL/2 [32]17.25.0776.520.650.63
REPA-XL/2 [52]7.95.06122.60.700.65
Improved-SiT-XL/210.95.3103.40.660.65
Improved-REPA-XL/28.145.34124.90.680.67
DDT-XL/2 (22En6De)6.624.86135.10.690.67
", + "image_path": "38214edebabe768045a7736f9e2fe165033a6bea65d8cf66547cf0b5ea3afac5.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 602, + 295, + 691 + ], + "lines": [ + { + "bbox": [ + 55, + 602, + 295, + 691 + ], + "spans": [ + { + "bbox": [ + 55, + 602, + 295, + 691 + ], + "type": "text", + "content": "Table 2. Metrics of " + }, + { + "bbox": [ + 55, + 602, + 295, + 691 + ], + "type": "inline_equation", + "content": "400K" + }, + { + "bbox": [ + 55, + 602, + 295, + 691 + ], + "type": "text", + "content": " training steps with different model sizes. All results are reported without classifier-free guidance. gray means metrics are copied from the original paper, otherwise it is produced by our codebase. By default, our DDT models are built on improved baselines. " + }, + { + "bbox": [ + 55, + 602, + 295, + 691 + ], + "type": "inline_equation", + "content": "\\mathrm{DDT}^{\\dagger}" + }, + { + "bbox": [ + 55, + 602, + 295, + 691 + ], + "type": "text", + "content": " means model built on naive baseline without architecture improvement and lognorm sampling, consistent to REPA. Our DDT models consistently outperformed their counterparts." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 370, + 455, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 370, + 455, + 384 + ], + "spans": [ + { + "bbox": [ + 313, + 370, + 455, + 384 + ], + "type": "text", + "content": "5.3. System level comparison" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 388, + 555, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 388, + 555, + 604 + ], + "spans": [ + { + "bbox": [ + 313, + 388, + 555, + 604 + ], + "type": "text", + "content": "ImageNet " + }, + { + "bbox": [ + 313, + 388, + 555, + 604 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 388, + 555, + 604 + ], + "type": "text", + "content": ". We report the final metrics of DDT-XL/2 (22En6De) and DDT-L/2 (20En4De) at Tab. 1. Our DDT models demonstrate exceptional efficiency, achieving convergence in approximately " + }, + { + "bbox": [ + 313, + 388, + 555, + 604 + ], + "type": "inline_equation", + "content": "\\frac{1}{4}" + }, + { + "bbox": [ + 313, + 388, + 555, + 604 + ], + "type": "text", + "content": " of the total epochs compared to REPA [52] and other diffusion transformer models. In order to maintain methodological consistency with REPA, we employed the classifier-free guidance with 2.0 in the interval [0.3, 1]. Our models delivered impressive results: DDT-L/2 achieved 1.64 FID, and DDT-XL/2 got 1.52 FID within just 80 epochs. By extending training to 256 epochs—still significantly more efficient than traditional 800-epoch approaches—our DDT-XL/2 established a new state-of-the-art benchmark of 1.31 FID on ImageNet " + }, + { + "bbox": [ + 313, + 388, + 555, + 604 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 388, + 555, + 604 + ], + "type": "text", + "content": ", decisively outperforming previous diffusion transformer methodologies. To extend training to 400 epochs, our DDT-XL/2(22En6De) achieves 1.26 FID, nearly reaching the upper limit of SD-VAE-ft-EMA-f8d4, which has a 1.20 rFID on ImageNet256." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "text", + "content": "ImageNet " + }, + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "text", + "content": " We provide the final metrics of DDT-XL/2 at Tab. 3. To validate the superiority of our DDT model, we take our DDT-XL/2 trained on ImageNet " + }, + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "inline_equation", + "content": "256 \\times" + }, + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "text", + "content": " 256 under 256 epochs as the initialization, fine-tune out DDT-XL/2 on ImageNet " + }, + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "inline_equation", + "content": "100K" + }, + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "text", + "content": " steps. We adopt the aforementioned interval guidance [26] and we achieved a remarkable state-of-the-art performance of 1.90 FID, decisively outperforming REPA by a significant 0.28" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 61, + 70, + 291, + 213 + ], + "blocks": [ + { + "bbox": [ + 61, + 70, + 291, + 213 + ], + "lines": [ + { + "bbox": [ + 61, + 70, + 291, + 213 + ], + "spans": [ + { + "bbox": [ + 61, + 70, + 291, + 213 + ], + "type": "table", + "html": "
ImageNet 512 × 512
ModelFID↓sFID↓IS↑Pre.↑Rec.↑
BigGAN-deep [3]8.438.13177.900.880.29
StyleGAN-XL [40]2.414.06267.750.770.52
ADM-G [10]7.726.57172.710.870.42
ADM-G, ADM-U3.855.86221.720.840.53
DiT-XL/2 [36]3.045.02240.820.840.54
SiT-XL/2 [32]2.624.18252.210.840.57
REPA-XL/2 [52]2.084.19274.60.830.58
FlowDCN-XL/2 [48]2.444.53252.80.840.54
DDT-XL/2 (500K)1.284.22305.10.800.63
", + "image_path": "c899eae3b28c35102cbbf666a0d15439efad97c88a461ea4e357d1a4523bec7a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 280, + 296, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 280, + 296, + 342 + ], + "spans": [ + { + "bbox": [ + 55, + 280, + 296, + 342 + ], + "type": "text", + "content": "performance margin. In Tab. 3, some metrics exhibit subtle degradation, we attribute this to potentially insufficient fine-tuning. When allocating more training iterations to DDT-XL/2, it achieves 1.28 FID at 500K steps with CFG3.0 within the time interval [0.3, 1.0]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 350, + 233, + 363 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 350, + 233, + 363 + ], + "spans": [ + { + "bbox": [ + 55, + 350, + 233, + 363 + ], + "type": "text", + "content": "5.4. Acceleration by Encoder sharing" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 368, + 296, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 368, + 296, + 429 + ], + "spans": [ + { + "bbox": [ + 55, + 368, + 296, + 429 + ], + "type": "text", + "content": "As illustrated in Fig. 5, there is a strong local consistency of the self-condition in our condition encoder. Even " + }, + { + "bbox": [ + 55, + 368, + 296, + 429 + ], + "type": "inline_equation", + "content": "\\boldsymbol{z}_{t=0}" + }, + { + "bbox": [ + 55, + 368, + 296, + 429 + ], + "type": "text", + "content": " has a strong similarity above 0.8 with " + }, + { + "bbox": [ + 55, + 368, + 296, + 429 + ], + "type": "inline_equation", + "content": "\\boldsymbol{z}_{t=1}" + }, + { + "bbox": [ + 55, + 368, + 296, + 429 + ], + "type": "text", + "content": ". This consistency provides an opportunity to speed up inference by sharing the encoder between adjacent steps." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 429, + 296, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 429, + 296, + 584 + ], + "spans": [ + { + "bbox": [ + 55, + 429, + 296, + 584 + ], + "type": "text", + "content": "We employed the simple uniform encoder sharing strategy and the new novel statistics dynamic programming strategy. Specifically, for the uniform strategy, we only recalculate the self-condition " + }, + { + "bbox": [ + 55, + 429, + 296, + 584 + ], + "type": "inline_equation", + "content": "z_{t}" + }, + { + "bbox": [ + 55, + 429, + 296, + 584 + ], + "type": "text", + "content": " every " + }, + { + "bbox": [ + 55, + 429, + 296, + 584 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 55, + 429, + 296, + 584 + ], + "type": "text", + "content": " steps. For statistics dynamic programming, we solve the aforementioned minimal sum path on the similarity matrix by dynamic programming and recalculate " + }, + { + "bbox": [ + 55, + 429, + 296, + 584 + ], + "type": "inline_equation", + "content": "z_{t}" + }, + { + "bbox": [ + 55, + 429, + 296, + 584 + ], + "type": "text", + "content": " according to the solved strategy. As shown in Fig. 6, there is a significant inference speedup nearly without visual quality loss when " + }, + { + "bbox": [ + 55, + 429, + 296, + 584 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 55, + 429, + 296, + 584 + ], + "type": "text", + "content": " is smaller than 6. As shown in Tab. 4, the metrics loss is still marginal, while the inference speedup is significant. The novel statistics dynamic programming slightly outperformed the naive uniform strategy with less FID drop." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 593, + 124, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 593, + 124, + 605 + ], + "spans": [ + { + "bbox": [ + 55, + 593, + 124, + 605 + ], + "type": "text", + "content": "5.5. Ablations" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 611, + 295, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 611, + 295, + 672 + ], + "spans": [ + { + "bbox": [ + 55, + 611, + 295, + 672 + ], + "type": "text", + "content": "We conduct ablation studies on ImageNet " + }, + { + "bbox": [ + 55, + 611, + 295, + 672 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 611, + 295, + 672 + ], + "type": "text", + "content": " with DDT-B/2 and DDT-L/2. For sampling, we take the Euler solver with 250 steps as the default choice without classifier-free guidance. For training, we train each model with 80 epochs(400k steps), and the batch size is set to 256." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 689, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 715 + ], + "type": "text", + "content": "Encoder-Decoder Ratio we systematically explored ratios ranging from " + }, + { + "bbox": [ + 55, + 689, + 296, + 715 + ], + "type": "inline_equation", + "content": "2:1" + }, + { + "bbox": [ + 55, + 689, + 296, + 715 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 55, + 689, + 296, + 715 + ], + "type": "inline_equation", + "content": "5:1" + }, + { + "bbox": [ + 55, + 689, + 296, + 715 + ], + "type": "text", + "content": " across different model sizes." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 317, + 70, + 553, + 220 + ], + "blocks": [ + { + "bbox": [ + 55, + 220, + 295, + 277 + ], + "lines": [ + { + "bbox": [ + 55, + 220, + 295, + 277 + ], + "spans": [ + { + "bbox": [ + 55, + 220, + 295, + 277 + ], + "type": "text", + "content": "Table 3. Benchmarking class-conditional image generation on ImageNet " + }, + { + "bbox": [ + 55, + 220, + 295, + 277 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 55, + 220, + 295, + 277 + ], + "type": "text", + "content": ". Our DDT-XL/2(512 × 512) is fine-tuned from the same model trained on " + }, + { + "bbox": [ + 55, + 220, + 295, + 277 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 220, + 295, + 277 + ], + "type": "text", + "content": " resolution setting of 1.28M steps. We adopt the interval guidance with interval [0.3, 1] and CFG of 3.0" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 70, + 553, + 220 + ], + "lines": [ + { + "bbox": [ + 317, + 70, + 553, + 220 + ], + "spans": [ + { + "bbox": [ + 317, + 70, + 553, + 220 + ], + "type": "table", + "html": "
SharRatioAccΦFID↓sFID↓IS↑Prec.↑Rec.↑
0.001.0×Uniform1.314.62308.10.780.66
0.501.6×Uniform1.314.48300.50.780.65
0.661.9×Uniform1.324.46301.20.780.65
0.752.3×Uniform1.344.43302.70.780.65
0.802.6×Uniform1.364.40303.30.780.64
StatisticDP1.334.37301.70.780.64
0.832.7×Uniform1.374.41302.80.780.64
StatisticDP1.364.35300.30.780.64
0.873.0×Uniform1.424.43302.80.780.64
StatisticDP1.404.35302.40.780.64
", + "image_path": "6aeb8ad5d07d128fe9a1397a63a04c9474c9d30a1f1f63a493b7426b0b7ca35a.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 227, + 555, + 283 + ], + "lines": [ + { + "bbox": [ + 313, + 227, + 555, + 283 + ], + "spans": [ + { + "bbox": [ + 313, + 227, + 555, + 283 + ], + "type": "text", + "content": "Table 4. Metrics of " + }, + { + "bbox": [ + 313, + 227, + 555, + 283 + ], + "type": "inline_equation", + "content": "{400K}" + }, + { + "bbox": [ + 313, + 227, + 555, + 283 + ], + "type": "text", + "content": " training steps with different model sizes. All results are reported without classifier-free guidance. gray means metrics are copied from the original paper, otherwise it is produced by our codebase. Our DDT models consistently outperformed its counterparts" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 323, + 290, + 556, + 479 + ], + "blocks": [ + { + "bbox": [ + 323, + 290, + 556, + 479 + ], + "lines": [ + { + "bbox": [ + 323, + 290, + 556, + 479 + ], + "spans": [ + { + "bbox": [ + 323, + 290, + 556, + 479 + ], + "type": "image", + "image_path": "033d63fcd85c9681fb31fefd5873fed342d5bf14138f45e31589702949bc1243.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 490, + 555, + 523 + ], + "lines": [ + { + "bbox": [ + 313, + 490, + 555, + 523 + ], + "spans": [ + { + "bbox": [ + 313, + 490, + 555, + 523 + ], + "type": "text", + "content": "Figure 5. The cosine similarity of self-condition feature " + }, + { + "bbox": [ + 313, + 490, + 555, + 523 + ], + "type": "inline_equation", + "content": "z_{t}" + }, + { + "bbox": [ + 313, + 490, + 555, + 523 + ], + "type": "text", + "content": " from encoder between different timesteps. There is a strong correlation between adjacent steps, indicating the redundancy." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 320, + 528, + 551, + 630 + ], + "blocks": [ + { + "bbox": [ + 320, + 528, + 551, + 630 + ], + "lines": [ + { + "bbox": [ + 320, + 528, + 551, + 630 + ], + "spans": [ + { + "bbox": [ + 320, + 528, + 551, + 630 + ], + "type": "image", + "image_path": "ee4c2612ac81285275068e95259e898b96d23d847186f0bc23ef8c53d733f70c.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 638, + 555, + 684 + ], + "lines": [ + { + "bbox": [ + 313, + 638, + 555, + 684 + ], + "spans": [ + { + "bbox": [ + 313, + 638, + 555, + 684 + ], + "type": "text", + "content": "Figure 6. Sharing the self-condition " + }, + { + "bbox": [ + 313, + 638, + 555, + 684 + ], + "type": "inline_equation", + "content": "z_{t}" + }, + { + "bbox": [ + 313, + 638, + 555, + 684 + ], + "type": "text", + "content": " in adjacent steps significant speedup the inference. We tried various sharing frequency configurations. There is marginal visual quality down-gradation when the sharing frequency is reasonable." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "text", + "content": "in Fig. 7 and Fig. 8. Our notation " + }, + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "inline_equation", + "content": "m\\mathrm{En}n\\mathrm{De}" + }, + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "text", + "content": " represents models with " + }, + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "text", + "content": " encoder layers and " + }, + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "text", + "content": " decoder layers. The inves" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 65, + 72, + 222, + 192 + ], + "blocks": [ + { + "bbox": [ + 65, + 72, + 222, + 192 + ], + "lines": [ + { + "bbox": [ + 65, + 72, + 222, + 192 + ], + "spans": [ + { + "bbox": [ + 65, + 72, + 222, + 192 + ], + "type": "image", + "image_path": "407f5b5ff8f50c1416dcfbeafdd2fea4269a384cafb24e536a03e1635a638c3b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 228, + 72, + 386, + 192 + ], + "blocks": [ + { + "bbox": [ + 228, + 72, + 386, + 192 + ], + "lines": [ + { + "bbox": [ + 228, + 72, + 386, + 192 + ], + "spans": [ + { + "bbox": [ + 228, + 72, + 386, + 192 + ], + "type": "image", + "image_path": "2757235a519a66a5a620a7b238d4836051bbd2024c54007acd28fc92d3a5a40d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 391, + 77, + 548, + 192 + ], + "blocks": [ + { + "bbox": [ + 391, + 77, + 548, + 192 + ], + "lines": [ + { + "bbox": [ + 391, + 77, + 548, + 192 + ], + "spans": [ + { + "bbox": [ + 391, + 77, + 548, + 192 + ], + "type": "image", + "image_path": "4b8104cb4cd6afc445f9075381ba25c807552c05b2bdf48c9ed38b7d6ca8600f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 65, + 224, + 222, + 342 + ], + "blocks": [ + { + "bbox": [ + 55, + 202, + 553, + 224 + ], + "lines": [ + { + "bbox": [ + 55, + 202, + 553, + 224 + ], + "spans": [ + { + "bbox": [ + 55, + 202, + 553, + 224 + ], + "type": "text", + "content": "Figure 7. The DDT-B/2 built upon Improved-baselines under various Encoder and Decoder layer ratio. DDT-B/2(8En4De) achieves much faster convergence speed and better performance." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 65, + 224, + 222, + 342 + ], + "lines": [ + { + "bbox": [ + 65, + 224, + 222, + 342 + ], + "spans": [ + { + "bbox": [ + 65, + 224, + 222, + 342 + ], + "type": "image", + "image_path": "444038e1ba194d39dcc047006d8249c0659c38455fcbbb140ca296cf75f66b58.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 353, + 553, + 375 + ], + "lines": [ + { + "bbox": [ + 55, + 353, + 553, + 375 + ], + "spans": [ + { + "bbox": [ + 55, + 353, + 553, + 375 + ], + "type": "text", + "content": "Figure 8. The DDT-L/2 built upon Improved-baselines under various Encoder and Decoder layer ratio. DDT-L/2 prefers an unexpected aggressive encoder-decoder ratio DDT-L/2(20En4De) achieves much faster convergence speed and better performance." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 228, + 225, + 385, + 342 + ], + "blocks": [ + { + "bbox": [ + 228, + 225, + 385, + 342 + ], + "lines": [ + { + "bbox": [ + 228, + 225, + 385, + 342 + ], + "spans": [ + { + "bbox": [ + 228, + 225, + 385, + 342 + ], + "type": "image", + "image_path": "ead4974e709c35db97c6cbfb7ffa7b61e5d9ea3ee2de853402c30d478dacb855.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 391, + 228, + 548, + 342 + ], + "blocks": [ + { + "bbox": [ + 391, + 228, + 548, + 342 + ], + "lines": [ + { + "bbox": [ + 391, + 228, + 548, + 342 + ], + "spans": [ + { + "bbox": [ + 391, + 228, + 548, + 342 + ], + "type": "image", + "image_path": "0107bf7a532ff9c9a3c73a46031f8f1ad26badac4b3d1c14d2d9de1f95c1568a.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 382, + 295, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 382, + 295, + 537 + ], + "spans": [ + { + "bbox": [ + 55, + 382, + 295, + 537 + ], + "type": "text", + "content": "tigation experiments in Fig. 7 and Fig. 8 revealed critical insights into architectural optimization. We observed that a larger encoder is beneficial for further improving the performance as the model size increases. For the Base model in Fig. 7, the optimal configuration emerged as 8 encoder layers and 4 decoder layers, delivering superior performance and convergence speed. Notably, the Large model in Fig. 8 exhibited a distinct preference, achieving peak performance with 20 encoder layers and 4 decoder layers, an unexpectedly aggressive encoder-decoder ratio. This unexpected discovery motivates us to scale the layer ratio in DDT-XL/2 to 22 encoder layers and 6 decoders to explore the performance upper limits of diffusion transformers." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 552, + 295, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 552, + 295, + 660 + ], + "spans": [ + { + "bbox": [ + 55, + 552, + 295, + 660 + ], + "type": "text", + "content": "Decoder Block types. In our investigation of decoder block types and their impact on high-frequency decoding performance, we systematically evaluated multiple architectural configurations. Our comprehensive assessment included alternative approaches such as simple " + }, + { + "bbox": [ + 55, + 552, + 295, + 660 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 55, + 552, + 295, + 660 + ], + "type": "text", + "content": " convolution blocks and naive MLP blocks. As shown in Tab. 5, the default (Attention with the MLP) setting achieves better results. Thanks to the encoder-decoder design, naive Conv blocks even achieve comparable results." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 671, + 128, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 671, + 128, + 683 + ], + "spans": [ + { + "bbox": [ + 55, + 671, + 128, + 683 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 690, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 690, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 690, + 295, + 713 + ], + "type": "text", + "content": "In this paper, we have introduced a novel Decoupled Diffusion Transformer, which rethinks the optimization dilemma" + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 339, + 380, + 531, + 437 + ], + "blocks": [ + { + "bbox": [ + 339, + 380, + 531, + 437 + ], + "lines": [ + { + "bbox": [ + 339, + 380, + 531, + 437 + ], + "spans": [ + { + "bbox": [ + 339, + 380, + 531, + 437 + ], + "type": "table", + "html": "
DecoderBlockFID↓sFID↓IS↑Prec.↑Rec.↑
Conv+MLP16.967.3385.10.620.65
MLP+MLP24.137.8965.00.570.65
Attn+MLP16.326.6386.00.620.66
", + "image_path": "d644e55597e5568b650ac9c7f700087e5b4a931d95fdf3b462fa03c854c68e3b.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 445, + 555, + 489 + ], + "lines": [ + { + "bbox": [ + 313, + 445, + 555, + 489 + ], + "spans": [ + { + "bbox": [ + 313, + 445, + 555, + 489 + ], + "type": "text", + "content": "Table 5. Metrics of " + }, + { + "bbox": [ + 313, + 445, + 555, + 489 + ], + "type": "inline_equation", + "content": "400K" + }, + { + "bbox": [ + 313, + 445, + 555, + 489 + ], + "type": "text", + "content": " training steps on DDT-B/2(8En4De) with different decoder blocks. All results are reported without classifier-free guidance. The Default Attention + MLP configuration achieves best performance." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 504, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 504, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 504, + 555, + 715 + ], + "type": "text", + "content": "of the traditional diffusion transformer. By decoupling the low-frequency encoding and high-frequency decoding into dedicated components, we effectively resolved the optimization dilemma that has constrained diffusion transformer. Furthermore, we discovered that increasing the encoder capacity relative to the decoder yields increasingly beneficial results as the overall model scale grows. This insight provides valuable guidance for future model scaling efforts. Our experiments demonstrate that our DDT-XL/2 (22En6De) with an unexpected aggressive encoder-decoder layer ratio achieves great performance while requiring only 256 training epochs. This significant improvement in efficiency addresses one of the primary limitations of diffusion models: their lengthy training requirements. The decoupled architecture also presents opportunities for inference optimization through our proposed encoder result sharing mechanism. Our statistical dynamic programming approach for determining optimal sharing strategies enables faster inference while minimizing quality" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 297, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 297, + 97 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 297, + 97 + ], + "type": "text", + "content": "degradation, demonstrating that architectural innovations can yield benefits beyond their primary design objectives." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 116, + 115, + 128 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 116, + 115, + 128 + ], + "spans": [ + { + "bbox": [ + 56, + 116, + 115, + 128 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 57, + 137, + 295, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 61, + 137, + 295, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 137, + 295, + 190 + ], + "spans": [ + { + "bbox": [ + 61, + 137, + 295, + 190 + ], + "type": "text", + "content": "[1] Niket Agarwal, Arslan Ali, Maciej Bala, Yogesh Balaji, Erik Barker, Tiffany Cai, Prithvijit Chattopadhyay, Yongxin Chen, Yin Cui, Yifan Ding, et al. Cosmos world foundation model platform for physical ai. arXiv preprint arXiv:2501.03575, 2025. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 191, + 295, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 191, + 295, + 246 + ], + "spans": [ + { + "bbox": [ + 62, + 191, + 295, + 246 + ], + "type": "text", + "content": "[2] Fan Bao, Shen Nie, Kaiwen Xue, Yue Cao, Chongxuan Li, Hang Su, and Jun Zhu. All are worth words: A vit backbone for diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22669-22679, 2023. 2, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 247, + 294, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 247, + 294, + 280 + ], + "spans": [ + { + "bbox": [ + 62, + 247, + 294, + 280 + ], + "type": "text", + "content": "[3] Andrew Brock, Jeff Donahue, and Karen Simonyan. Large scale gan training for high fidelity natural image synthesis. arXiv preprint arXiv:1809.11096, 2018. 1, 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 281, + 294, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 281, + 294, + 324 + ], + "spans": [ + { + "bbox": [ + 62, + 281, + 294, + 324 + ], + "type": "text", + "content": "[4] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 325, + 294, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 325, + 294, + 378 + ], + "spans": [ + { + "bbox": [ + 62, + 325, + 294, + 378 + ], + "type": "text", + "content": "[5] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11315-11325, 2022. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 380, + 294, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 380, + 294, + 434 + ], + "spans": [ + { + "bbox": [ + 62, + 380, + 294, + 434 + ], + "type": "text", + "content": "[6] Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al. Pixart- " + }, + { + "bbox": [ + 62, + 380, + 294, + 434 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 62, + 380, + 294, + 434 + ], + "type": "text", + "content": " alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis. arXiv preprint arXiv:2310.00426, 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 436, + 295, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 436, + 295, + 490 + ], + "spans": [ + { + "bbox": [ + 62, + 436, + 295, + 490 + ], + "type": "text", + "content": "[7] Junsong Chen, Chongjian Ge, Enze Xie, Yue Wu, Lewei Yao, Xiaozhe Ren, Zhongdao Wang, Ping Luo, Huchuan Lu, and Zhenguo Li. Pixart- " + }, + { + "bbox": [ + 62, + 436, + 295, + 490 + ], + "type": "inline_equation", + "content": "\\backslash" + }, + { + "bbox": [ + 62, + 436, + 295, + 490 + ], + "type": "text", + "content": " sigma: Weak-to-strong training of diffusion transformer for 4k text-to-image generation. arXiv preprint arXiv:2403.04692, 2024. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 491, + 294, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 491, + 294, + 523 + ], + "spans": [ + { + "bbox": [ + 62, + 491, + 294, + 523 + ], + "type": "text", + "content": "[8] Xiangxiang Chu, Jianlin Su, Bo Zhang, and Chunhua Shen. Visionllama: A unified llama interface for vision tasks. arXiv preprint arXiv:2403.00522, 2024. 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 525, + 294, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 525, + 294, + 557 + ], + "spans": [ + { + "bbox": [ + 62, + 525, + 294, + 557 + ], + "type": "text", + "content": "[9] Chaorui Deng, Deyao Zh, Kunchang Li, Shi Guan, and Haoqi Fan. Causal diffusion transformers for generative modeling. arXiv preprint arXiv:2412.12095, 2024. 6, 12" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 558, + 294, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 558, + 294, + 590 + ], + "spans": [ + { + "bbox": [ + 57, + 558, + 294, + 590 + ], + "type": "text", + "content": "[10] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 2, 6, 7, 11" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 591, + 294, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 591, + 294, + 612 + ], + "spans": [ + { + "bbox": [ + 57, + 591, + 294, + 612 + ], + "type": "text", + "content": "[11] Sander Dieleman. Diffusion is spectral autoregression, 2024. 3, 4" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 614, + 295, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 614, + 295, + 669 + ], + "spans": [ + { + "bbox": [ + 57, + 614, + 295, + 669 + ], + "type": "text", + "content": "[12] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. arXiv preprint arXiv:2403.03206, 2024. 2, 5" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 57, + 670, + 295, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 670, + 295, + 712 + ], + "spans": [ + { + "bbox": [ + 57, + 670, + 295, + 712 + ], + "type": "text", + "content": "[13] Zhengcong Fei, Mingyuan Fan, Changqian Yu, Debang Li, and Junshi Huang. Diffusion-rwkv: Scaling rwkv-like architectures for diffusion models. arXiv preprint arXiv:2404.04478, 2024. 2" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 712 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 316, + 73, + 553, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 553, + 127 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 553, + 127 + ], + "type": "text", + "content": "[14] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Masked diffusion transformer is a strong image synthesizer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 23164-23173, 2023. 3, 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 129, + 553, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 129, + 553, + 183 + ], + "spans": [ + { + "bbox": [ + 317, + 129, + 553, + 183 + ], + "type": "text", + "content": "[15] Shanghua Gao, Pan Zhou, Ming-Ming Cheng, and Shuicheng Yan. Masked diffusion transformer is a strong image synthesizer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23164-23173, 2023. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 184, + 553, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 184, + 553, + 239 + ], + "spans": [ + { + "bbox": [ + 317, + 184, + 553, + 239 + ], + "type": "text", + "content": "[16] Tiankai Hang, Shuyang Gu, Chen Li, Jianmin Bao, Dong Chen, Han Hu, Xin Geng, and Baining Guo. Efficient diffusion training via min-snr weighting strategy. In Proceedings of the IEEE/CVF international conference on computer vision, pages 7441-7451, 2023. 2, 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 239, + 553, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 239, + 553, + 293 + ], + "spans": [ + { + "bbox": [ + 317, + 239, + 553, + 293 + ], + "type": "text", + "content": "[17] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollar, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 16000-16009, 2022. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 294, + 553, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 294, + 553, + 348 + ], + "spans": [ + { + "bbox": [ + 317, + 294, + 553, + 348 + ], + "type": "text", + "content": "[18] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 5" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 350, + 553, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 350, + 553, + 382 + ], + "spans": [ + { + "bbox": [ + 316, + 350, + 553, + 382 + ], + "type": "text", + "content": "[19] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 383, + 553, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 383, + 553, + 425 + ], + "spans": [ + { + "bbox": [ + 316, + 383, + 553, + 425 + ], + "type": "text", + "content": "[20] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. arXiv preprint arXiv:2205.15868, 2022. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 426, + 553, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 426, + 553, + 469 + ], + "spans": [ + { + "bbox": [ + 316, + 426, + 553, + 469 + ], + "type": "text", + "content": "[21] Tero Karras, Miika Aittala, Timo Aila, and Samuli Laine. Elucidating the design space of diffusion-based generative models. Advances in Neural Information Processing Systems, 35:26565-26577, 2022. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 471, + 553, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 471, + 553, + 502 + ], + "spans": [ + { + "bbox": [ + 316, + 471, + 553, + 502 + ], + "type": "text", + "content": "[22] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 11" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 504, + 553, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 504, + 553, + 558 + ], + "spans": [ + { + "bbox": [ + 317, + 504, + 553, + 558 + ], + "type": "text", + "content": "[23] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4015-4026, 2023. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 559, + 553, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 559, + 553, + 612 + ], + "spans": [ + { + "bbox": [ + 317, + 559, + 553, + 612 + ], + "type": "text", + "content": "[24] Weijie Kong, Qi Tian, Zijian Zhang, Rox Min, Zuozhuo Dai, Jin Zhou, Jiangfeng Xiong, Xin Li, Bo Wu, Jianwei Zhang, et al. Hunyuanvideo: A systematic framework for large video generative models. arXiv preprint arXiv:2412.03603, 2024. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 614, + 553, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 614, + 553, + 658 + ], + "spans": [ + { + "bbox": [ + 316, + 614, + 553, + 658 + ], + "type": "text", + "content": "[25] Tuomas Kynkänniemi, Tero Karras, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Improved precision and recall metric for assessing generative models. Advances in neural information processing systems, 32, 2019. 5" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 659, + 553, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 659, + 553, + 712 + ], + "spans": [ + { + "bbox": [ + 317, + 659, + 553, + 712 + ], + "type": "text", + "content": "[26] Tuomas Kynkänniemi, Miika Aittala, Tero Karras, Samuli Laine, Timo Aila, and Jaakko Lehtinen. Applying guidance in a limited interval improves sample and distribution quality in diffusion models. arXiv preprint arXiv:2404.07724, 2024. 6" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 297, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 56, + 72, + 297, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 297, + 116 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 297, + 116 + ], + "type": "text", + "content": "[27] Tianhong Li, Dina Katabi, and Kaiming He. Return of unconditional generation: A self-supervised representation generation method. Advances in Neural Information Processing Systems, 37:125441-125468, 2024. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 118, + 295, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 118, + 295, + 162 + ], + "spans": [ + { + "bbox": [ + 56, + 118, + 295, + 162 + ], + "type": "text", + "content": "[28] Tianhong Li, Yonglong Tian, He Li, Mingyang Deng, and Kaiming He. Autoregressive image generation without vector quantization. Advances in Neural Information Processing Systems, 37:56424-56445, 2025. 2, 4, 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 163, + 295, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 163, + 295, + 196 + ], + "spans": [ + { + "bbox": [ + 56, + 163, + 295, + 196 + ], + "type": "text", + "content": "[29] Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le. Flow matching for generative modeling. arXiv preprint arXiv:2210.02747, 2022. 1, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 198, + 295, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 198, + 295, + 230 + ], + "spans": [ + { + "bbox": [ + 56, + 198, + 295, + 230 + ], + "type": "text", + "content": "[30] Xingchao Liu, Chengyue Gong, and Qiang Liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022. 1, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 232, + 295, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 232, + 295, + 274 + ], + "spans": [ + { + "bbox": [ + 56, + 232, + 295, + 274 + ], + "type": "text", + "content": "[31] Zeyu Lu, Zidong Wang, Di Huang, Chengyue Wu, Xihui Liu, Wanli Ouyang, and Lei Bai. Fit: Flexible vision transformer for diffusion model. arXiv preprint arXiv:2402.12376, 2024.5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 276, + 295, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 276, + 295, + 330 + ], + "spans": [ + { + "bbox": [ + 56, + 276, + 295, + 330 + ], + "type": "text", + "content": "[32] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. Sit: Exploring flow and diffusion-based generative models with scalable interpolant transformers. arXiv preprint arXiv:2401.08740, 2024. 2, 3, 5, 6, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 332, + 296, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 332, + 296, + 376 + ], + "spans": [ + { + "bbox": [ + 56, + 332, + 296, + 376 + ], + "type": "text", + "content": "[33] Xinyin Ma, Gongfan Fang, and Xinchao Wang. Deepcache: Accelerating diffusion models for free. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 15762-15772, 2024. 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 377, + 295, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 377, + 295, + 410 + ], + "spans": [ + { + "bbox": [ + 56, + 377, + 295, + 410 + ], + "type": "text", + "content": "[34] Charlie Nash, Jacob Menick, Sander Dieleman, and Peter W Battaglia. Generating images with sparse representations. arXiv preprint arXiv:2103.03841, 2021. 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 411, + 295, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 411, + 295, + 465 + ], + "spans": [ + { + "bbox": [ + 56, + 411, + 295, + 465 + ], + "type": "text", + "content": "[35] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 467, + 295, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 467, + 295, + 510 + ], + "spans": [ + { + "bbox": [ + 56, + 467, + 295, + 510 + ], + "type": "text", + "content": "[36] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 2, 4, 5, 6, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 512, + 295, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 512, + 295, + 544 + ], + "spans": [ + { + "bbox": [ + 56, + 512, + 295, + 544 + ], + "type": "text", + "content": "[37] Severi Rissanen, Markus Heinonen, and Arno Solin. Generative modelling with inverse heat dissipation. arXiv preprint arXiv:2206.13397, 2022. 3, 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 546, + 296, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 546, + 296, + 600 + ], + "spans": [ + { + "bbox": [ + 56, + 546, + 296, + 600 + ], + "type": "text", + "content": "[38] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 601, + 295, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 601, + 295, + 644 + ], + "spans": [ + { + "bbox": [ + 56, + 601, + 295, + 644 + ], + "type": "text", + "content": "[39] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 5" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 647, + 295, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 647, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 56, + 647, + 295, + 689 + ], + "type": "text", + "content": "[40] Axel Sauer, Katja Schwarz, and Andreas Geiger. Styleganx1: Scaling stylegan to large diverse datasets. In ACM SIGGRAPH 2022 conference proceedings, pages 1-10, 2022. 1, 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 691, + 295, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 691, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 691, + 295, + 713 + ], + "type": "text", + "content": "[41] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 688 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "text", + "content": "generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 96, + 553, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 96, + 553, + 139 + ], + "spans": [ + { + "bbox": [ + 316, + 96, + 553, + 139 + ], + "type": "text", + "content": "[42] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 141, + 553, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 141, + 553, + 185 + ], + "spans": [ + { + "bbox": [ + 316, + 141, + 553, + 185 + ], + "type": "text", + "content": "[43] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 186, + 553, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 186, + 553, + 229 + ], + "spans": [ + { + "bbox": [ + 316, + 186, + 553, + 229 + ], + "type": "text", + "content": "[44] Jiayan Teng, Wendi Zheng, Ming Ding, Wenyi Hong, Jianqiao Wangni, Zhuoyi Yang, and Jie Tang. Relay diffusion: Unifying diffusion process across resolutions for image synthesis. arXiv preprint arXiv:2309.03350, 2023. 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 231, + 553, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 231, + 553, + 274 + ], + "spans": [ + { + "bbox": [ + 316, + 231, + 553, + 274 + ], + "type": "text", + "content": "[45] Yao Teng, Yue Wu, Han Shi, Xuefei Ning, Guohao Dai, Yu Wang, Zhenguo Li, and Xihui Liu. Dim: Diffusion mamba for efficient high-resolution image synthesis. arXiv preprint arXiv:2405.14224, 2024. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 276, + 553, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 276, + 553, + 330 + ], + "spans": [ + { + "bbox": [ + 316, + 276, + 553, + 330 + ], + "type": "text", + "content": "[46] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. 5" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 331, + 553, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 331, + 553, + 386 + ], + "spans": [ + { + "bbox": [ + 316, + 331, + 553, + 386 + ], + "type": "text", + "content": "[47] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 388, + 553, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 388, + 553, + 430 + ], + "spans": [ + { + "bbox": [ + 316, + 388, + 553, + 430 + ], + "type": "text", + "content": "[48] Shuai Wang, Zexian Li, Tianhui Song, Xubin Li, Tiezheng Ge, Bo Zheng, and Limin Wang. Flowdcn: Exploring dcn-like architectures for fast image generation with arbitrary resolution. arXiv preprint arXiv:2410.22655, 2024. 2, 6, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 432, + 553, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 432, + 553, + 464 + ], + "spans": [ + { + "bbox": [ + 316, + 432, + 553, + 464 + ], + "type": "text", + "content": "[49] Jing Nathan Yan, Jiatao Gu, and Alexander M Rush. Diffusion models without attention. arXiv preprint arXiv:2311.18257, 2023. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 466, + 553, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 466, + 553, + 498 + ], + "spans": [ + { + "bbox": [ + 316, + 466, + 553, + 498 + ], + "type": "text", + "content": "[50] Jingfeng Yao and Xinggang Wang. Reconstruction vs. generation: Taming optimization dilemma in latent diffusion models. arXiv preprint arXiv:2501.01423, 2025. 5, 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 500, + 553, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 500, + 553, + 532 + ], + "spans": [ + { + "bbox": [ + 316, + 500, + 553, + 532 + ], + "type": "text", + "content": "[51] Qihang Yu, Ju He, Xueqing Deng, Xiaohui Shen, and Liang-Chieh Chen. Randomized autoregressive visual generation. arXiv preprint arXiv:2411.00776, 2024. 1" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 533, + 553, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 533, + 553, + 587 + ], + "spans": [ + { + "bbox": [ + 316, + 533, + 553, + 587 + ], + "type": "text", + "content": "[52] Sihyun Yu, Sangkyung Kwak, Huiwon Jang, Jongheon Jeong, Jonathan Huang, Jinwoo Shin, and Saining Xie. Representation alignment for generation: Training diffusion transformers is easier than you think. arXiv preprint arXiv:2410.06940, 2024. 2, 3, 4, 5, 6, 7, 12" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 590, + 553, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 590, + 553, + 633 + ], + "spans": [ + { + "bbox": [ + 316, + 590, + 553, + 633 + ], + "type": "text", + "content": "[53] Xiaoyu Yue, Zidong Wang, Zeyu Lu, Shuyang Sun, Meng Wei, Wanli Ouyang, Lei Bai, and Luping Zhou. Diffusion models need visual priors for image generation. arXiv preprint arXiv:2410.08531, 2024. 2, 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 635, + 553, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 635, + 553, + 688 + ], + "spans": [ + { + "bbox": [ + 316, + 635, + 553, + 688 + ], + "type": "text", + "content": "[54] Le Zhuo, Ruoyi Du, Han Xiao, Yangguang Li, Dongyang Liu, Rongjie Huang, Wenze Liu, Lirui Zhao, Fu-Yun Wang, Zhanyu Ma, et al. Lumina-last: Making lumina-t2x stronger and faster with next-dit. arXiv preprint arXiv:2406.18583, 2024. 2" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 71, + 138, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 138, + 85 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 138, + 85 + ], + "type": "text", + "content": "A. Model Specs" + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 82, + 100, + 270, + 161 + ], + "blocks": [ + { + "bbox": [ + 82, + 100, + 270, + 161 + ], + "lines": [ + { + "bbox": [ + 82, + 100, + 270, + 161 + ], + "spans": [ + { + "bbox": [ + 82, + 100, + 270, + 161 + ], + "type": "table", + "html": "
Config#LayersHidden dim#Heads
B/21276812
L/224102416
XL/228115216
", + "image_path": "ba57dee1c812d45caeb0fdacc9dbbc045f6fbdb63a51e0e248e9d6101fefe26e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 191, + 168, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 191, + 168, + 205 + ], + "spans": [ + { + "bbox": [ + 56, + 191, + 168, + 205 + ], + "type": "text", + "content": "B. Hyper-parameters" + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 75, + 220, + 276, + 383 + ], + "blocks": [ + { + "bbox": [ + 75, + 220, + 276, + 383 + ], + "lines": [ + { + "bbox": [ + 75, + 220, + 276, + 383 + ], + "spans": [ + { + "bbox": [ + 75, + 220, + 276, + 383 + ], + "type": "table", + "html": "
VAE\nVAE donwsample\nlatent channelSD-VAE-f8d4-ft-ema\n8\n4
optimizerAdamW [22]
base learning rate1e-4
weight decay0.0
batch size256
learning rate scheduleconstant
augmentationcenter crop
diffusion samplerEuler-ODE
diffusion steps250
evaluation suiteADM [10]
", + "image_path": "72b79304d4617c22a6598c0b47f0a98af50e4c7399f5c40fbd5541f405d454d1.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 401, + 205, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 401, + 205, + 414 + ], + "spans": [ + { + "bbox": [ + 55, + 401, + 205, + 414 + ], + "type": "text", + "content": "C. Linear flow and Diffusion" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 421, + 235, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 421, + 235, + 434 + ], + "spans": [ + { + "bbox": [ + 55, + 421, + 235, + 434 + ], + "type": "text", + "content": "Given the SDE forward and reverse process:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 66, + 438, + 294, + 452 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 438, + 294, + 452 + ], + "spans": [ + { + "bbox": [ + 66, + 438, + 294, + 452 + ], + "type": "interline_equation", + "content": "d \\boldsymbol {x} _ {t} = f (t) \\boldsymbol {x} _ {t} \\mathrm {d} t + g (t) \\mathrm {d} \\boldsymbol {w} \\tag {9}", + "image_path": "fdeeb07fee366407b4c1ec2e6a90f9c92696eae5376595daabb142150d0d25fb.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 66, + 454, + 294, + 468 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 454, + 294, + 468 + ], + "spans": [ + { + "bbox": [ + 66, + 454, + 294, + 468 + ], + "type": "interline_equation", + "content": "d \\boldsymbol {x} _ {t} = [ f (t) \\boldsymbol {x} _ {t} - g (t) ^ {2} \\nabla_ {\\boldsymbol {x}} \\log p (\\boldsymbol {x} _ {t}) ] d t + g (t) d \\boldsymbol {w} \\tag {10}", + "image_path": "bee9931fee3a7da5bea1ea0c7e74e9aecc400b894acf32b949526bc5462ef3ee.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 473, + 295, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 473, + 295, + 508 + ], + "spans": [ + { + "bbox": [ + 55, + 473, + 295, + 508 + ], + "type": "text", + "content": "A corresponding deterministic process exists with trajectories sharing the same marginal probability densities of reverse SDE." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 90, + 510, + 295, + 533 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 510, + 295, + 533 + ], + "spans": [ + { + "bbox": [ + 90, + 510, + 295, + 533 + ], + "type": "interline_equation", + "content": "d \\boldsymbol {x} _ {t} = [ f (t) \\boldsymbol {x} _ {t} - \\frac {1}{2} g (t) ^ {2} \\nabla_ {\\boldsymbol {x} _ {t}} \\log p (\\boldsymbol {x} _ {t}) ] d t \\tag {11}", + "image_path": "84b1912394fff3cb83fd0749fd0dec605e2b25ed9a230329921b06dc0dbcf2bc.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 537, + 295, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 537, + 295, + 559 + ], + "spans": [ + { + "bbox": [ + 55, + 537, + 295, + 559 + ], + "type": "text", + "content": "Given " + }, + { + "bbox": [ + 55, + 537, + 295, + 559 + ], + "type": "inline_equation", + "content": "x_{t} = \\alpha_{t}x_{data} + \\sigma \\epsilon" + }, + { + "bbox": [ + 55, + 537, + 295, + 559 + ], + "type": "text", + "content": ". The traditional diffusion model learns:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 126, + 558, + 294, + 581 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 558, + 294, + 581 + ], + "spans": [ + { + "bbox": [ + 126, + 558, + 294, + 581 + ], + "type": "interline_equation", + "content": "\\nabla_ {\\boldsymbol {x} _ {t}} \\log p (\\boldsymbol {x} _ {t}) = - \\frac {\\epsilon}{\\sigma (t)} \\tag {12}", + "image_path": "f0e7fe44ac48890dcc46337bba04d7e43598e78e34f85a499709901d7ce871dc.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 582, + 295, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 582, + 295, + 605 + ], + "spans": [ + { + "bbox": [ + 55, + 582, + 295, + 605 + ], + "type": "text", + "content": "The flow-matching framework actually learns the following:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 145, + 612, + 294, + 638 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 612, + 294, + 638 + ], + "spans": [ + { + "bbox": [ + 145, + 612, + 294, + 638 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\boldsymbol {v} _ {t} = \\dot {\\alpha} x + \\dot {\\sigma} \\epsilon (13) \\\\ = x - \\epsilon (14) \\\\ \\end{array}", + "image_path": "5c964270f3a4817ca55a037ecad8114adfd78a4b41f8f787a697f5c1bd04ae58.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 644, + 294, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 644, + 294, + 667 + ], + "spans": [ + { + "bbox": [ + 55, + 644, + 294, + 667 + ], + "type": "text", + "content": "Here we will demonstrate in flow-matching, the " + }, + { + "bbox": [ + 55, + 644, + 294, + 667 + ], + "type": "inline_equation", + "content": "\\boldsymbol{v}_t" + }, + { + "bbox": [ + 55, + 644, + 294, + 667 + ], + "type": "text", + "content": " prediction is actually as same as the reverse ode:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 108, + 673, + 294, + 712 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 673, + 294, + 712 + ], + "spans": [ + { + "bbox": [ + 108, + 673, + 294, + 712 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\dot {\\alpha} x + \\dot {\\sigma} \\epsilon (15) \\\\ = f (t) \\boldsymbol {x} _ {t} - \\frac {1}{2} g (t) ^ {2} \\nabla_ {\\boldsymbol {x} _ {t}} \\log p (\\boldsymbol {x} _ {t}) (16) \\\\ \\end{array}", + "image_path": "7e39be504d140de8a0eb60ad117b831c92f3ff21d51dc4f094b22d491f317bb0.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 326, + 72, + 512, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 72, + 512, + 84 + ], + "spans": [ + { + "bbox": [ + 326, + 72, + 512, + 84 + ], + "type": "text", + "content": "Let us start by expanding the reverse ode first." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 334, + 98, + 553, + 176 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 98, + 553, + 176 + ], + "spans": [ + { + "bbox": [ + 334, + 98, + 553, + 176 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} f (t) \\boldsymbol {x} _ {t} - \\frac {1}{2} g (t) ^ {2} \\nabla_ {\\boldsymbol {x} _ {t}} \\log p (\\boldsymbol {x} _ {t}) (17) \\\\ = f (t) (\\alpha (t) \\boldsymbol {x} _ {\\text {d a t a}} + \\sigma (t) \\epsilon) - \\frac {1}{2} g (t) ^ {2} \\left[ - \\frac {\\epsilon}{\\sigma (t)} \\right] (18) \\\\ = f (t) \\alpha (t) \\boldsymbol {x} _ {\\text {d a t a}} + (f (t) \\sigma (t) + \\frac {1}{2} \\frac {g (t) ^ {2}}{\\sigma (t)}) \\epsilon (19) \\\\ \\end{array}", + "image_path": "fa74893b1abc667b3abe7101c9ff507e88e30cd4c438b34f1eb8f39e45365805.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 326, + 192, + 523, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 192, + 523, + 204 + ], + "spans": [ + { + "bbox": [ + 326, + 192, + 523, + 204 + ], + "type": "text", + "content": "To prove Eq. (16), we needs to demonstrate that:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 382, + 219, + 553, + 232 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 382, + 219, + 553, + 232 + ], + "spans": [ + { + "bbox": [ + 382, + 219, + 553, + 232 + ], + "type": "interline_equation", + "content": "\\dot {\\alpha} (t) = f _ {t} \\alpha (t) \\tag {20}", + "image_path": "389c597cef146629588878e629b7d0d0b5ca0d6398fabe96cf29e4bf25f18c60.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 383, + 235, + 553, + 261 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 383, + 235, + 553, + 261 + ], + "spans": [ + { + "bbox": [ + 383, + 235, + 553, + 261 + ], + "type": "interline_equation", + "content": "\\dot {\\sigma} (t) = f _ {t} \\sigma (t) + \\frac {1}{2} \\frac {g _ {t} ^ {2}}{\\sigma (t)}. \\tag {21}", + "image_path": "3819a0ec430cb3be1877e0e5d680b83af80f03b87287a8aa211ea73638f31fce.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 278, + 553, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 278, + 553, + 314 + ], + "spans": [ + { + "bbox": [ + 313, + 278, + 553, + 314 + ], + "type": "text", + "content": "Here, let us derive the relation between " + }, + { + "bbox": [ + 313, + 278, + 553, + 314 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 313, + 278, + 553, + 314 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 278, + 553, + 314 + ], + "type": "inline_equation", + "content": "\\alpha(t)" + }, + { + "bbox": [ + 313, + 278, + 553, + 314 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 278, + 553, + 314 + ], + "type": "inline_equation", + "content": "\\dot{\\alpha}(t)" + }, + { + "bbox": [ + 313, + 278, + 553, + 314 + ], + "type": "text", + "content": ". We donate " + }, + { + "bbox": [ + 313, + 278, + 553, + 314 + ], + "type": "inline_equation", + "content": "x_{data}(t) = \\alpha(t)x_{data}" + }, + { + "bbox": [ + 313, + 278, + 553, + 314 + ], + "type": "text", + "content": " is the remain component of " + }, + { + "bbox": [ + 313, + 278, + 553, + 314 + ], + "type": "inline_equation", + "content": "x_{data}" + }, + { + "bbox": [ + 313, + 278, + 553, + 314 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 313, + 278, + 553, + 314 + ], + "type": "inline_equation", + "content": "x_{t}" + }, + { + "bbox": [ + 313, + 278, + 553, + 314 + ], + "type": "text", + "content": ", it is easy to find that:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 384, + 330, + 553, + 342 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 384, + 330, + 553, + 342 + ], + "spans": [ + { + "bbox": [ + 384, + 330, + 553, + 342 + ], + "type": "interline_equation", + "content": "d \\boldsymbol {x} _ {\\text {d a t a}} (t) = f _ {t} \\boldsymbol {x} _ {\\text {d a t a}} (t) d t \\tag {22}", + "image_path": "b6145f4c8999c8423105577211463cbb37dd58825c7eacd8c3ec4723337ef0d0.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 372, + 344, + 553, + 357 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 344, + 553, + 357 + ], + "spans": [ + { + "bbox": [ + 372, + 344, + 553, + 357 + ], + "type": "interline_equation", + "content": "d \\left(\\alpha (t) x _ {d a t a}\\right) = f _ {t} \\alpha (t) x _ {d a t a} d t \\tag {23}", + "image_path": "a0eded9cc9000111a0d55a02d13bbbd9a2717def24bc78e459b1ae81d5d5b834.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 402, + 360, + 553, + 372 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 402, + 360, + 553, + 372 + ], + "spans": [ + { + "bbox": [ + 402, + 360, + 553, + 372 + ], + "type": "interline_equation", + "content": "d \\alpha (t) = f _ {t} \\alpha (t) d t \\tag {24}", + "image_path": "5fcda7ffbccffd15d471f3469e4b72ebac13d9d7c36657a87f266d34d09fb509.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 314, + 388, + 399, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 388, + 399, + 399 + ], + "spans": [ + { + "bbox": [ + 314, + 388, + 399, + 399 + ], + "type": "text", + "content": "So, Eq. (20) is right." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 313, + 402, + 554, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 402, + 554, + 438 + ], + "spans": [ + { + "bbox": [ + 313, + 402, + 554, + 438 + ], + "type": "text", + "content": "Based on the above equation, we will demonstrate the relation of " + }, + { + "bbox": [ + 313, + 402, + 554, + 438 + ], + "type": "inline_equation", + "content": "g_{t}, f_{t}" + }, + { + "bbox": [ + 313, + 402, + 554, + 438 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 313, + 402, + 554, + 438 + ], + "type": "inline_equation", + "content": "\\sigma(t)" + }, + { + "bbox": [ + 313, + 402, + 554, + 438 + ], + "type": "text", + "content": ". Note that Gaussian noise has nice additive properties." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 373, + 453, + 553, + 468 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 453, + 553, + 468 + ], + "spans": [ + { + "bbox": [ + 373, + 453, + 553, + 468 + ], + "type": "interline_equation", + "content": "a \\epsilon_ {1} + b \\epsilon_ {2} \\in \\mathcal {N} \\left(0, \\sqrt {a ^ {2} + b ^ {2}}\\right) \\tag {25}", + "image_path": "2174863fe03b8a6fefacf499b5367771b2602074960c0e4466c227983208a97e.jpg" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 313, + 483, + 554, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 483, + 554, + 534 + ], + "spans": [ + { + "bbox": [ + 313, + 483, + 554, + 534 + ], + "type": "text", + "content": "Let us start with the gaussian noise component " + }, + { + "bbox": [ + 313, + 483, + 554, + 534 + ], + "type": "inline_equation", + "content": "\\epsilon(t)" + }, + { + "bbox": [ + 313, + 483, + 554, + 534 + ], + "type": "text", + "content": " calculation, reaching at " + }, + { + "bbox": [ + 313, + 483, + 554, + 534 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 483, + 554, + 534 + ], + "type": "text", + "content": ", every noise addition at " + }, + { + "bbox": [ + 313, + 483, + 554, + 534 + ], + "type": "inline_equation", + "content": "s \\in [0, t]" + }, + { + "bbox": [ + 313, + 483, + 554, + 534 + ], + "type": "text", + "content": " while been decayed by a factor of " + }, + { + "bbox": [ + 313, + 483, + 554, + 534 + ], + "type": "inline_equation", + "content": "\\frac{\\alpha(t)}{\\alpha(s)}" + }, + { + "bbox": [ + 313, + 483, + 554, + 534 + ], + "type": "text", + "content": ". Thus, the mixed Gaussian noise will have a std variance " + }, + { + "bbox": [ + 313, + 483, + 554, + 534 + ], + "type": "inline_equation", + "content": "\\sigma(t)" + }, + { + "bbox": [ + 313, + 483, + 554, + 534 + ], + "type": "text", + "content": " of:" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 367, + 549, + 553, + 582 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 549, + 553, + 582 + ], + "spans": [ + { + "bbox": [ + 367, + 549, + 553, + 582 + ], + "type": "interline_equation", + "content": "\\sigma (t) = \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {\\alpha (t)}{\\alpha (s)}\\right) ^ {2} g _ {s} ^ {2} \\right] d s\\right)} \\tag {26}", + "image_path": "b345813acb199a689f123be1245f8188a9c4a1c20170c569d735c61297c4af60.jpg" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 368, + 584, + 553, + 616 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 368, + 584, + 553, + 616 + ], + "spans": [ + { + "bbox": [ + 368, + 584, + 553, + 616 + ], + "type": "interline_equation", + "content": "\\sigma (t) = \\alpha (t) \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {s}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)} \\tag {27}", + "image_path": "e347da8023d83daaeb7d2679ed18e187530d2c4a2361449fe8ee2f86e33f8b17.jpg" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 313, + 629, + 553, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 629, + 553, + 654 + ], + "spans": [ + { + "bbox": [ + 313, + 629, + 553, + 654 + ], + "type": "text", + "content": "After obtaining the relation of " + }, + { + "bbox": [ + 313, + 629, + 553, + 654 + ], + "type": "inline_equation", + "content": "f_{t}, g_{t}" + }, + { + "bbox": [ + 313, + 629, + 553, + 654 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 629, + 553, + 654 + ], + "type": "inline_equation", + "content": "\\alpha(t), \\sigma(t)" + }, + { + "bbox": [ + 313, + 629, + 553, + 654 + ], + "type": "text", + "content": ", we derive " + }, + { + "bbox": [ + 313, + 629, + 553, + 654 + ], + "type": "inline_equation", + "content": "\\dot{\\alpha}(t)" + }, + { + "bbox": [ + 313, + 629, + 553, + 654 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 629, + 553, + 654 + ], + "type": "inline_equation", + "content": "\\dot{\\sigma}(t)" + }, + { + "bbox": [ + 313, + 629, + 553, + 654 + ], + "type": "text", + "content": " with above conditions:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 384, + 669, + 553, + 696 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 384, + 669, + 553, + 696 + ], + "spans": [ + { + "bbox": [ + 384, + 669, + 553, + 696 + ], + "type": "interline_equation", + "content": "\\dot {\\alpha} (t) = f _ {t} \\exp \\left[ \\int_ {0} ^ {t} f _ {s} d s \\right] \\tag {28}", + "image_path": "c75b652e54027176b3f475b3533b97ed68ff31d9667b067ef60336a03c67441e.jpg" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 385, + 697, + 553, + 711 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 385, + 697, + 553, + 711 + ], + "spans": [ + { + "bbox": [ + 385, + 697, + 553, + 711 + ], + "type": "interline_equation", + "content": "\\dot {\\alpha} (t) = f _ {t} \\alpha (t) \\tag {29}", + "image_path": "f2c87fb5432980e214a9d9975e78137293848179ab75d640919142227f3d85e8.jpg" + } + ] + } + ], + "index": 33 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 232, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 232, + 84 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 232, + 84 + ], + "type": "text", + "content": "As for " + }, + { + "bbox": [ + 55, + 72, + 232, + 84 + ], + "type": "inline_equation", + "content": "\\dot{\\sigma} (t)" + }, + { + "bbox": [ + 55, + 72, + 232, + 84 + ], + "type": "text", + "content": " , it is quit complex but not hard:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 89, + 302, + 140 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 89, + 302, + 140 + ], + "spans": [ + { + "bbox": [ + 55, + 89, + 302, + 140 + ], + "type": "interline_equation", + "content": "\\dot {\\sigma} (t) = \\dot {\\alpha} (t) \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)} + \\alpha (t) \\frac {\\frac {1}{2} \\frac {g _ {t} ^ {2}}{\\alpha (t)}}{\\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} g _ {s} ^ {2} \\right] d s\\right)}} \\tag {30}", + "image_path": "5f19bedd2962b800c650498be0a19331c6b5d047d3286b065b1fe6c3b814debf.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 144, + 308, + 195 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 144, + 308, + 195 + ], + "spans": [ + { + "bbox": [ + 55, + 144, + 308, + 195 + ], + "type": "interline_equation", + "content": "\\dot {\\sigma} (t) = \\left(f _ {t} \\alpha (t)\\right) \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)} + \\alpha (t) \\frac {\\frac {1}{2} \\frac {g _ {t} ^ {2}}{\\alpha^ {2} (t)}}{\\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)}} \\tag {31}", + "image_path": "ef88d14de92e600bddbd04ca0deb18eae6e257915926630273d672bfa394ac82.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 198, + 300, + 249 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 198, + 300, + 249 + ], + "spans": [ + { + "bbox": [ + 55, + 198, + 300, + 249 + ], + "type": "interline_equation", + "content": "\\dot {\\sigma} (t) = f _ {t} \\alpha (t) \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)} + \\frac {\\frac {1}{2} g _ {t} ^ {2}}{\\alpha (t) \\sqrt {\\left(\\int_ {0} ^ {t} \\left[ \\left(\\frac {g _ {t}}{\\alpha (s)}\\right) ^ {2} \\right] d s\\right)}} \\tag {32}", + "image_path": "1d28894d6a6c123e5ad9b28eb9137e605f6610da6589dca3780f4f02c6a2550c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 251, + 296, + 277 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 251, + 296, + 277 + ], + "spans": [ + { + "bbox": [ + 55, + 251, + 296, + 277 + ], + "type": "interline_equation", + "content": "\\dot {\\sigma} (t) = f _ {t} \\sigma (t) + \\frac {1}{2} \\frac {g t}{\\sigma (t)} \\tag {33}", + "image_path": "38a84746352f99aa58f5024892b9a887d5561868bc1c864e25f7023b62d30ebf.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 280, + 139, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 280, + 139, + 293 + ], + "spans": [ + { + "bbox": [ + 55, + 280, + 139, + 293 + ], + "type": "text", + "content": "So, Eq. (21) is right." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 301, + 246, + 316 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 301, + 246, + 316 + ], + "spans": [ + { + "bbox": [ + 55, + 301, + 246, + 316 + ], + "type": "text", + "content": "D. Proof of Spectrum Autoregressive" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 321, + 296, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 321, + 296, + 369 + ], + "spans": [ + { + "bbox": [ + 55, + 321, + 296, + 369 + ], + "type": "text", + "content": "Given the noise scheduler " + }, + { + "bbox": [ + 55, + 321, + 296, + 369 + ], + "type": "inline_equation", + "content": "\\{\\alpha_{t},\\sigma_{t}\\}" + }, + { + "bbox": [ + 55, + 321, + 296, + 369 + ], + "type": "text", + "content": ", the clean data " + }, + { + "bbox": [ + 55, + 321, + 296, + 369 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\mathrm{data}}" + }, + { + "bbox": [ + 55, + 321, + 296, + 369 + ], + "type": "text", + "content": " and Gaussian noise " + }, + { + "bbox": [ + 55, + 321, + 296, + 369 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 55, + 321, + 296, + 369 + ], + "type": "text", + "content": ". Denote " + }, + { + "bbox": [ + 55, + 321, + 296, + 369 + ], + "type": "inline_equation", + "content": "K_{freq}" + }, + { + "bbox": [ + 55, + 321, + 296, + 369 + ], + "type": "text", + "content": " as the maximum frequency of the clean data " + }, + { + "bbox": [ + 55, + 321, + 296, + 369 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\mathrm{data}}" + }, + { + "bbox": [ + 55, + 321, + 296, + 369 + ], + "type": "text", + "content": ". The noisy latent " + }, + { + "bbox": [ + 55, + 321, + 296, + 369 + ], + "type": "inline_equation", + "content": "x_{t}" + }, + { + "bbox": [ + 55, + 321, + 296, + 369 + ], + "type": "text", + "content": " at timestep " + }, + { + "bbox": [ + 55, + 321, + 296, + 369 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 55, + 321, + 296, + 369 + ], + "type": "text", + "content": " has been defined as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 376, + 294, + 388 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 376, + 294, + 388 + ], + "spans": [ + { + "bbox": [ + 132, + 376, + 294, + 388 + ], + "type": "interline_equation", + "content": "\\boldsymbol {x} _ {t} = \\alpha_ {t} \\boldsymbol {x} _ {\\text {d a t a}} + \\sigma_ {t} \\boldsymbol {\\epsilon} \\tag {34}", + "image_path": "0ebc019b251670c81e10a173838c2b88d5c11676822922ca18c1f72bf0779ad2.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 392, + 296, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 392, + 296, + 415 + ], + "spans": [ + { + "bbox": [ + 55, + 392, + 296, + 415 + ], + "type": "text", + "content": "The spectrum magnitude " + }, + { + "bbox": [ + 55, + 392, + 296, + 415 + ], + "type": "inline_equation", + "content": "c_{i}" + }, + { + "bbox": [ + 55, + 392, + 296, + 415 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 55, + 392, + 296, + 415 + ], + "type": "inline_equation", + "content": "x_{t}" + }, + { + "bbox": [ + 55, + 392, + 296, + 415 + ], + "type": "text", + "content": " on DCT basics " + }, + { + "bbox": [ + 55, + 392, + 296, + 415 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_{i}" + }, + { + "bbox": [ + 55, + 392, + 296, + 415 + ], + "type": "text", + "content": " follows:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 112, + 420, + 181, + 435 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 420, + 181, + 435 + ], + "spans": [ + { + "bbox": [ + 112, + 420, + 181, + 435 + ], + "type": "interline_equation", + "content": "\\boldsymbol {c} _ {i} = \\mathbb {E} _ {\\epsilon} \\left[ \\boldsymbol {u} _ {i} ^ {T} \\boldsymbol {x} _ {t} \\right] ^ {2}", + "image_path": "d24970774cf36353f1a5ade86c1c0aaaad9182aff0186e1a5c8b4c07a041452a.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 113, + 437, + 238, + 452 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 437, + 238, + 452 + ], + "spans": [ + { + "bbox": [ + 113, + 437, + 238, + 452 + ], + "type": "interline_equation", + "content": "\\boldsymbol {c} _ {i} = \\mathbb {E} _ {\\epsilon} [ \\boldsymbol {u} _ {i} ^ {T} (\\alpha_ {t} \\boldsymbol {x} _ {d a t a} + \\sigma_ {t} \\boldsymbol {\\epsilon}) ] ^ {2}", + "image_path": "1ccde59bc1a1cfc30df0759f2c95e650a8bc6504557e78d0263dbb2317d8503f.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 456, + 296, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 456, + 296, + 480 + ], + "spans": [ + { + "bbox": [ + 55, + 456, + 296, + 480 + ], + "type": "text", + "content": "Recall that the spectrum magnitude of Gaussian noise " + }, + { + "bbox": [ + 55, + 456, + 296, + 480 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 55, + 456, + 296, + 480 + ], + "type": "text", + "content": " is uniformly distributed." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 495, + 302, + 511 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 495, + 302, + 511 + ], + "spans": [ + { + "bbox": [ + 55, + 495, + 302, + 511 + ], + "type": "interline_equation", + "content": "\\pmb {c} _ {i} = [ \\alpha_ {t} \\pmb {u} _ {i} ^ {T} \\pmb {x} _ {d a t a} ] ^ {2} + 2 \\alpha_ {t} \\sigma_ {t} \\mathbb {E} _ {\\epsilon} [ \\pmb {u} _ {i} ^ {T} \\pmb {x} _ {d a t a} \\pmb {u} _ {i} ^ {T} \\epsilon ] + \\sigma_ {t} ^ {2} \\mathbb {E} _ {\\epsilon} [ \\pmb {u} _ {i} ^ {T} \\epsilon ] ^ {2}", + "image_path": "c3875c50b27ad48d26b25e205cc0b4387ac9b59eb0f16fcf47d31401462f4ec8.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 512, + 197, + 527 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 512, + 197, + 527 + ], + "spans": [ + { + "bbox": [ + 55, + 512, + 197, + 527 + ], + "type": "interline_equation", + "content": "\\boldsymbol {c} _ {i} = \\left[ \\alpha_ {t} \\boldsymbol {u} _ {i} ^ {T} \\boldsymbol {x} _ {\\text {d a t a}} \\right] ^ {2} + \\sigma_ {t} ^ {2} \\mathbb {E} _ {\\boldsymbol {\\epsilon}} \\left[ \\boldsymbol {u} _ {i} ^ {T} \\boldsymbol {\\epsilon} \\right] ^ {2}", + "image_path": "f4ea69d751e0733816159b64e5f6726b899de25b722611fce391bc9874c2deee.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 529, + 167, + 544 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 529, + 167, + 544 + ], + "spans": [ + { + "bbox": [ + 56, + 529, + 167, + 544 + ], + "type": "interline_equation", + "content": "\\boldsymbol {c} _ {i} = \\alpha_ {t} ^ {2} \\left[ \\boldsymbol {u} _ {i} ^ {T} \\boldsymbol {x} _ {\\text {d a t a}} \\right] ^ {2} + \\sigma_ {t} ^ {2} \\lambda", + "image_path": "52fcbf99861d85d37be6457dffaacd66f6ad84be517791bad824e0570d123414.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 55, + 548, + 296, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 548, + 296, + 597 + ], + "spans": [ + { + "bbox": [ + 55, + 548, + 296, + 597 + ], + "type": "text", + "content": "if " + }, + { + "bbox": [ + 55, + 548, + 296, + 597 + ], + "type": "inline_equation", + "content": "\\sigma_t^2\\lambda" + }, + { + "bbox": [ + 55, + 548, + 296, + 597 + ], + "type": "text", + "content": " has bigger value than " + }, + { + "bbox": [ + 55, + 548, + 296, + 597 + ], + "type": "inline_equation", + "content": "[\\alpha_t\\pmb{u}_i^T\\pmb{x}_{data}]^2" + }, + { + "bbox": [ + 55, + 548, + 296, + 597 + ], + "type": "text", + "content": ", the spectrum magnitude " + }, + { + "bbox": [ + 55, + 548, + 296, + 597 + ], + "type": "inline_equation", + "content": "\\pmb{c}_i" + }, + { + "bbox": [ + 55, + 548, + 296, + 597 + ], + "type": "text", + "content": " on DCT basics " + }, + { + "bbox": [ + 55, + 548, + 296, + 597 + ], + "type": "inline_equation", + "content": "\\pmb{u}_i" + }, + { + "bbox": [ + 55, + 548, + 296, + 597 + ], + "type": "text", + "content": " will be canceled, thus the maximal remaining frequency " + }, + { + "bbox": [ + 55, + 548, + 296, + 597 + ], + "type": "inline_equation", + "content": "f_{max}(t)" + }, + { + "bbox": [ + 55, + 548, + 296, + 597 + ], + "type": "text", + "content": " of original data in " + }, + { + "bbox": [ + 55, + 548, + 296, + 597 + ], + "type": "inline_equation", + "content": "\\pmb{x}_t" + }, + { + "bbox": [ + 55, + 548, + 296, + 597 + ], + "type": "text", + "content": " follows:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 77, + 601, + 296, + 634 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 601, + 296, + 634 + ], + "spans": [ + { + "bbox": [ + 77, + 601, + 296, + 634 + ], + "type": "interline_equation", + "content": "f _ {\\max } (t) > \\min \\left(\\left(\\frac {\\alpha_ {t} \\boldsymbol {u} _ {i} ^ {T} \\boldsymbol {x} _ {\\text {d a t a}}}{\\sigma_ {t} \\lambda}\\right) ^ {2}, K _ {\\text {f r e q}}\\right) \\tag {35}", + "image_path": "edbf3374676256bbec9e003ba5f71437d9cca7343568d0db40ce70370c6cbe35.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 55, + 639, + 296, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 639, + 296, + 680 + ], + "spans": [ + { + "bbox": [ + 55, + 639, + 296, + 680 + ], + "type": "text", + "content": "Though " + }, + { + "bbox": [ + 55, + 639, + 296, + 680 + ], + "type": "inline_equation", + "content": "\\frac{\\alpha_t\\pmb{u}_i^T\\pmb{x}_{data}}{\\sigma_t\\lambda}^2" + }, + { + "bbox": [ + 55, + 639, + 296, + 680 + ], + "type": "text", + "content": " depends on the dataset. Here, we directly suppose it as a constant 1. And replace " + }, + { + "bbox": [ + 55, + 639, + 296, + 680 + ], + "type": "inline_equation", + "content": "\\alpha = t" + }, + { + "bbox": [ + 55, + 639, + 296, + 680 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 639, + 296, + 680 + ], + "type": "inline_equation", + "content": "\\sigma = 1 - t" + }, + { + "bbox": [ + 55, + 639, + 296, + 680 + ], + "type": "text", + "content": " in above equation:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 97, + 684, + 296, + 717 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 684, + 296, + 717 + ], + "spans": [ + { + "bbox": [ + 97, + 684, + 296, + 717 + ], + "type": "interline_equation", + "content": "f _ {\\max } (t) > \\min \\left(\\left(\\frac {t}{1 - t}\\right) ^ {2}, K _ {f r e q}\\right) \\tag {36}", + "image_path": "ef506982b570c9f78fd58fd3bebb850fa6cacd339dab1077cef17f8ffdc36eff.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 72, + 464, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 464, + 85 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 464, + 85 + ], + "type": "text", + "content": "E. Linear multisteps method" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 91, + 554, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 91, + 554, + 139 + ], + "spans": [ + { + "bbox": [ + 313, + 91, + 554, + 139 + ], + "type": "text", + "content": "We conduct targeted experiment on SiT-XL/2 with Adams-Bashforth like linear multistep solver; To clarify, we did not employ this powerful solver for our DDT models in all tables across the main paper." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 139, + 554, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 139, + 554, + 163 + ], + "spans": [ + { + "bbox": [ + 313, + 139, + 554, + 163 + ], + "type": "text", + "content": "The reverse ode of the diffusion models tackles the following integral:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 368, + 171, + 554, + 199 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 368, + 171, + 554, + 199 + ], + "spans": [ + { + "bbox": [ + 368, + 171, + 554, + 199 + ], + "type": "interline_equation", + "content": "\\boldsymbol {x} _ {i + 1} = \\boldsymbol {x} _ {i} + \\int_ {t _ {i}} ^ {t _ {i + 1}} \\boldsymbol {v} _ {\\theta} (\\boldsymbol {x} _ {t}, t) d t \\tag {37}", + "image_path": "d17765a1e5feab4d07f1bd3f7602575d7a1ed95b04842153a41df62ac97583ba.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 208, + 554, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 208, + 554, + 233 + ], + "spans": [ + { + "bbox": [ + 313, + 208, + 554, + 233 + ], + "type": "text", + "content": "The classic Euler method employs " + }, + { + "bbox": [ + 313, + 208, + 554, + 233 + ], + "type": "inline_equation", + "content": "\\pmb{v}_{\\theta}(\\pmb{x}_i, t_i)" + }, + { + "bbox": [ + 313, + 208, + 554, + 233 + ], + "type": "text", + "content": " as an estimate of " + }, + { + "bbox": [ + 313, + 208, + 554, + 233 + ], + "type": "inline_equation", + "content": "\\pmb{v}_{\\theta}(\\pmb{x}_t, t)" + }, + { + "bbox": [ + 313, + 208, + 554, + 233 + ], + "type": "text", + "content": " throughout the interval " + }, + { + "bbox": [ + 313, + 208, + 554, + 233 + ], + "type": "inline_equation", + "content": "[t_i, t_{i+1}]" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 361, + 242, + 553, + 255 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 242, + 553, + 255 + ], + "spans": [ + { + "bbox": [ + 361, + 242, + 553, + 255 + ], + "type": "interline_equation", + "content": "\\boldsymbol {x} _ {i + 1} = \\boldsymbol {x} _ {i} + \\left(t _ {i + 1} - t _ {i}\\right) \\boldsymbol {v} _ {\\theta} \\left(\\boldsymbol {x} _ {i}, t _ {i}\\right). \\tag {38}", + "image_path": "635a316bd00ad0a5ed1896afecb07f016c5e1270da8f778e259185289c975a0c.jpg" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 313, + 264, + 554, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 264, + 554, + 312 + ], + "spans": [ + { + "bbox": [ + 313, + 264, + 554, + 312 + ], + "type": "text", + "content": "The most classic multi-step solver Adams-Bashforth method (deemed as Adams for brevity) incorporates the Lagrange polynomial to improve the estimation accuracy with previous predictions." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 315, + 321, + 495, + 355 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 321, + 495, + 355 + ], + "spans": [ + { + "bbox": [ + 315, + 321, + 495, + 355 + ], + "type": "interline_equation", + "content": "\\boldsymbol {v} _ {\\theta} (\\boldsymbol {x} _ {t}, t) = \\sum_ {j = 0} ^ {i} (\\prod_ {k = 0, k \\neq j} ^ {i} \\frac {t - t _ {k}}{t _ {j} - t _ {k}}) \\boldsymbol {v} _ {\\theta} (\\boldsymbol {x} _ {j}, t _ {j})", + "image_path": "8b034d5198ebc4188d13804b0061ca47f550b932382fdce17a2346d467b3bdfd.jpg" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 332, + 357, + 552, + 392 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 357, + 552, + 392 + ], + "spans": [ + { + "bbox": [ + 332, + 357, + 552, + 392 + ], + "type": "interline_equation", + "content": "\\boldsymbol {x} _ {i + 1} \\approx \\boldsymbol {x} _ {i} + \\int_ {t _ {i}} ^ {t _ {i + 1}} \\sum_ {j = 0} ^ {i} (\\prod_ {k = 0, k \\neq j} ^ {i} \\frac {t - t _ {k}}{t _ {j} - t _ {k}}) \\boldsymbol {v} _ {\\theta} (\\boldsymbol {x} _ {j}, t _ {j}) d t", + "image_path": "5a24c09eedca9747aacdbdfd328bb09196ff1b1755c47a408ce01368daedce5d.jpg" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 331, + 394, + 554, + 428 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 394, + 554, + 428 + ], + "spans": [ + { + "bbox": [ + 331, + 394, + 554, + 428 + ], + "type": "interline_equation", + "content": "\\boldsymbol {x} _ {i + 1} \\approx \\boldsymbol {x} _ {i} + \\sum_ {j = 0} ^ {i} \\boldsymbol {v} _ {\\theta} (\\boldsymbol {x} _ {j}, t _ {j}) \\int_ {t _ {i}} ^ {t _ {i + 1}} \\left(\\prod_ {k = 0, k \\neq j} ^ {i} \\frac {t - t _ {k}}{t _ {j} - t _ {k}}\\right) d t", + "image_path": "ea170857e813c06b559baee57141d84ae7bea959d35cae659af69d404d9a32e5.jpg" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 313, + 437, + 554, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 437, + 554, + 488 + ], + "spans": [ + { + "bbox": [ + 313, + 437, + 554, + 488 + ], + "type": "text", + "content": "Note that " + }, + { + "bbox": [ + 313, + 437, + 554, + 488 + ], + "type": "inline_equation", + "content": "\\int_{t_i}^{t_{i + 1}}\\left(\\prod_{k = 0,k\\neq j}^i\\frac{t - t_k}{t_j - t_k}\\right)dt" + }, + { + "bbox": [ + 313, + 437, + 554, + 488 + ], + "type": "text", + "content": " of the Lagrange polynomial can be pre-integrated into a constant coefficient, resulting in only naive summation being required for ODE solving." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 314, + 498, + 453, + 512 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 498, + 453, + 512 + ], + "spans": [ + { + "bbox": [ + 314, + 498, + 453, + 512 + ], + "type": "text", + "content": "F. Classifier free guidance." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 313, + 518, + 555, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 518, + 555, + 663 + ], + "spans": [ + { + "bbox": [ + 313, + 518, + 555, + 663 + ], + "type": "text", + "content": "As classifier-free guidance significantly impacts the performance of diffusion models. Traditional classifier-free guidance improves performance at the cost of decreased diversity. Interval guidance is recently been adopted by REPA[52] and Causalfusion[9]. It applies classifier-free guidance only to the high-frequency generation phase to preserve the diversity. We sweep different classifier-free guidance strength with selected intervals. Our DDT-XL/2 achieves the best performance with interval [0.3, 1] with a classifier-free guidance of 2. Recall that we donate " + }, + { + "bbox": [ + 313, + 518, + 555, + 663 + ], + "type": "inline_equation", + "content": "t = 0" + }, + { + "bbox": [ + 313, + 518, + 555, + 663 + ], + "type": "text", + "content": " as the pure noise timestep while REPA[52] use " + }, + { + "bbox": [ + 313, + 518, + 555, + 663 + ], + "type": "inline_equation", + "content": "t = 1" + }, + { + "bbox": [ + 313, + 518, + 555, + 663 + ], + "type": "text", + "content": ", thus this exactly corresponds to the [0, 0.7] interval in REPA[52]" + } + ] + } + ], + "index": 32 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 278, + 291, + 457 + ], + "blocks": [ + { + "bbox": [ + 107, + 269, + 260, + 278 + ], + "lines": [ + { + "bbox": [ + 107, + 269, + 260, + 278 + ], + "spans": [ + { + "bbox": [ + 107, + 269, + 260, + 278 + ], + "type": "text", + "content": "Classifier-free guidance with intervals" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 59, + 278, + 291, + 457 + ], + "lines": [ + { + "bbox": [ + 59, + 278, + 291, + 457 + ], + "spans": [ + { + "bbox": [ + 59, + 278, + 291, + 457 + ], + "type": "image", + "image_path": "d832ac8214ead6291814ec65b36289871ce8a3fe75d47d1a9b200cf3022d9f90.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 469, + 295, + 525 + ], + "lines": [ + { + "bbox": [ + 55, + 469, + 295, + 525 + ], + "spans": [ + { + "bbox": [ + 55, + 469, + 295, + 525 + ], + "type": "text", + "content": "Figure 9. FID10K of DDT-XL/2 with different Classifier free guidance strength and guidance intervals. We sweep different classifier-free guidance strength with selected intervals. Our DDT-XL/2 achieves the best performance with interval [0.3, 1] with a classifier-free guidance of 2." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05747/ec2428c8-34ac-40c3-9d6a-d16ed6c46317_content_list.json b/data/2025/2504_05xxx/2504.05747/ec2428c8-34ac-40c3-9d6a-d16ed6c46317_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..d6544ae734227ccda360092c12913b5fd586ffcf --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/ec2428c8-34ac-40c3-9d6a-d16ed6c46317_content_list.json @@ -0,0 +1,1905 @@ +[ + { + "type": "image", + "img_path": "images/9f6b9f3761ad7213c0ce54d33552c028e28aae4e28023f2ef7dc3583c49097e7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 164, + 82, + 230, + 127 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "SEA-LION: Southeast Asian Languages in One Network", + "text_level": 1, + "bbox": [ + 238, + 95, + 828, + 116 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Raymond Ng\\*, Thanh Ngan Nguyen\\*, Yuli Huang\\*, Ngee Chia Tai\\*, Wai Yi Leong\\*, Wei Qi Leong\\*, Xianbin Yong\\*, Jian Gang Ngui\\*, Yosephine Susanto\\*, Nicholas Cheng\\*, Hamsawardhini Rengarajan\\*, Peerat Limkonchotiwat\\*, Adithya Venkatadri Hulagadri\\*, Kok Wai Teng\\*, Yeo Yeow Tong\\*, Bryan Siow\\*, Wei Yi Teo\\*, Wayne Lau\\*, Choon Meng Tan\\*, Brandon Ong\\*, Zhi Hao Ong\\*, Jann Railey Montalan\\*, Adwin Chan\\*, Sajeban Antonyrex\\*, Ren Lee\\*, Esther Choa\\*, David Ong Tat-Wee\\*, Bing Jie Darius Liu\\*, William Chandra Tjhi\\*, Erik Cambria\\*, Leslie Teo\\* AI Singapore, National University of Singapore \\*Nanyang Technological University https://sea-lion.ai", + "bbox": [ + 129, + 127, + 870, + 293 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 260, + 309, + 339, + 323 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, Large Language Models (LLMs) have dominated much of the artificial intelligence scene with their ability to process and generate natural languages. However, the majority of LLM research and development remains English-centric, leaving low-resource languages such as those in the Southeast Asian (SEA) region underrepresented. To address this representation gap, we introduce Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, two cutting-edge multilingual LLMs designed for SEA languages. The SEA-LION family of LLMs supports 11 SEA languages, namely English, Chinese, Indonesian, Vietnamese, Malay, Thai, Burmese, Lao, Filipino, Tamil, and Khmer. Our work leverages large-scale multilingual continued pre-training with a comprehensive post-training regime involving multiple stages of instruction fine-tuning, alignment, and model merging. Evaluation results on multilingual benchmarks show that our models achieve state-of-the-art performance across LLMs supporting SEA languages. We open-source the models to benefit the wider SEA community.", + "bbox": [ + 141, + 332, + 460, + 689 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 114, + 697, + 258, + 712 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large language models (LLMs) have significantly transformed the field of natural language processing, achieving remarkable performance in text generation, summarization and sentiment analysis (Brown et al., 2020; OpenAI, 2023; Dubey et al., 2024; Rivière et al., 2024; Zhang et al., 2024b; Yeo et al., 2024). Despite their impressive capabilities, most LLMs remain heavily English-centric (Wendler et al., 2024; Zhong et al., 2024). Unfortunately, this situation has led LLMs in regions with many under-represented languages such", + "bbox": [ + 112, + 722, + 489, + 900 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "as Southeast Asia (SEA) to suffer. Languages with lower resources, such as Filipino, Lao, Burmese and Khmer in the SEA region, are not supported by many open-source English-centric LLMs. This underscores the need to bridge the resource and representation gap between English and SEA languages.", + "bbox": [ + 507, + 309, + 884, + 420 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, there have been many attempts to create multilingual LLMs in an open-source manner, e.g., BLOOM (Scao et al., 2022), a project aimed at increasing multilingual presence in opensource LLMs by supporting 46 languages. Popular LLM families such as Llama (Dubey et al., 2024), Gemma (Rivière et al., 2024) and Qwen (Yang et al., 2024a) have also introduced multilingual LLMs for their latest iteration. During our evaluations, we found that the performance of these models is acceptable in the general case, i.e., when considering evaluation benchmarks formulated from English datasets. However, we observe that the performance degrades on SEA-specific benchmarks. Moreover, researchers have also introduced LLMs such as SeaLLMs (Nguyen et al., 2024; Zhang et al., 2024a) and Sailor (Dou et al., 2024) to specifically address the LLM gap in SEA languages. However, the performance of these models is less than ideal for languages such as Thai or Tamil $^{2}$ (10X et al., 2024; AI Products Team, 2024).", + "bbox": [ + 507, + 425, + 884, + 762 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we address the issues by proposing a robust open-source Southeast Asian model with data transparency for reproducibility, namely SEA-LION - a family of LLMs continued pretrained (CPT) and fine-tuned on Llama-3.1-8B-Instruct for Llama-SEA-LION-8B-IT and Gemma2-9B for Gemma-SEA-LION-9B-IT with a focus", + "bbox": [ + 507, + 766, + 884, + 878 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$^{1}$ SEA-LION Models Collection", + "bbox": [ + 136, + 906, + 331, + 919 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "2Tamil is one of the official languages in Singapore. It is also spoken in other areas in the SEA region, such as Malaysia.", + "bbox": [ + 507, + 894, + 882, + 921 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.05747v4 [cs.CL] 30 Oct 2025", + "bbox": [ + 21, + 290, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "on SEA languages. To tackle the performance problem, we utilize 200 billion English, code, and SEA languages tokens as well as 16.8 million English and SEA languages instruction and answer pairs for CPT and post-training steps, respectively, to achieve a significant improvement in SEA languages. In order to allow our models to be used by everyone without restrictions, we release our models under the fully open MIT license. We benchmark our models against the SEA-HELM(Susanto et al., 2025) and Open LLM Leaderboard3 with other LLMs of similar sizes in Southeast Asia like Sailor 2 (Team, 2024) and SeaLLMs 3 (Zhang et al., 2024a), where our models achieve state-of-the-art performances.", + "bbox": [ + 112, + 84, + 489, + 324 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We summarize the contribution of our paper as follows.", + "bbox": [ + 112, + 326, + 487, + 357 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We released two LLMs, Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, that are meticulously trained to accurately represent the unique linguistic diversity of SEA languages.", + "- We also provide in-depth insights in this paper into our end-to-end training workflow to benefit the community developing multilingual LLMs.", + "- We present a reproducible dataset development process, covering sourcing and the model training process. We release our training artifacts, including the training dataset, training scripts, training checkpoints, and fine-tuned models, including Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, to provide strong baselines, promote reproducibility, and enable future research on applications that require SEA-specific knowledge4." + ], + "bbox": [ + 119, + 359, + 489, + 633 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Continued pre-training (CPT)", + "text_level": 1, + "bbox": [ + 112, + 664, + 410, + 682 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 Pre-training data", + "text_level": 1, + "bbox": [ + 112, + 692, + 297, + 707 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The CPT data consists of a curated set of English, multilingual, and code corpora from several open source repositories like Dolma (Soldaini et al., 2024), FineWeb (Penedo et al., 2024), the-stackv2 (Lozhkov et al., 2024), SEA-LION-Pile (AI Singapore, 2023), SEA-LION-Pilev2 (AI Singapore, 2025), as well as documents from CommonCrawl (CommonCrawl, 2024) and from the public domain, such as Wikipedia (Foun", + "bbox": [ + 112, + 713, + 489, + 858 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "dation, 2024). For SEA-LION-Pilev2, we filter CommonCrawl WARC data for documents in SEA languages (i.e., Burmese, Simplified Chinese, Indonesian, Khmer, Lao, Malay, Filipino, Tamil, Thai, and Vietnamese) using the pretrained fast-text language classifier (Joulin et al., 2017).", + "bbox": [ + 507, + 84, + 885, + 181 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A document is retained if the language code reported in its metadata matches that of one of the aforementioned SEA languages. Additionally, we further clean up the data with Trafilatura (Barbaresi, 2021). To determine the optimal dataset ratio between SEA languages, code, and English for the CPT process, we conduct a series of small-scale CPT experiments, each with a training budget of 10 billion tokens and varying proportions of English, code, and SEA language data. We settled on an optimal data mix ratio of $55\\%$ SEA languages, $25\\%$ English, and $20\\%$ code tokens for a budget of 200 billion tokens. For a detailed breakdown of the token count by languages, please refer to Table 6.", + "bbox": [ + 507, + 192, + 885, + 418 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2 CPT process", + "text_level": 1, + "bbox": [ + 507, + 464, + 655, + 482 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Model selection. For the models to CPT from, we choose Llama-3.1-8B-Instruct (Dubey et al., 2024) and Gemma-2-9B (Rivière et al., 2024).", + "bbox": [ + 507, + 508, + 882, + 555 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Training setup. Following previous works (Dou et al., 2024), we use BPE-Dropout (Provilkov et al., 2020) to increase the performance and robustness of the training. We use a Warmup-Stable-Decay (WSD) (Hu et al., 2024) scheduler with warm-up and cooldown phases each representing $10\\%$ of the entire training budget. We use the AdamW (Loshchilov and Hutter, 2019) optimizer with the maximum learning rate (LR) set to $1e^{-5}$ and the final LR after cooldown is $1e^{-7}$ . Following Wortsman et al. (2024), we set epsilon to $1e^{-15}$ . We use Composer (Team, 2021) and LLM Foundry (Team, 2022) for distributed training using Fully Sharded Data Parallel (Zhao et al., 2023) on a cluster of eight nodes of the p5.48xlarge instance from Amazon Web Services (AWS). The total training duration was approximately 6 days and 10 days for the Llama 3.1 and Gemma 2 models, respectively. In this paper, we refer to the post-CPT models as Llama-SEA-LION-8B and Gemma-SEA-LION-9B for the Llama 3.1 and Gemma 2 continued pre-trained models, respectively.", + "bbox": [ + 507, + 567, + 885, + 922 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "3Open LLM Leaderboard", + "bbox": [ + 134, + 869, + 294, + 883 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "4Please visit https://huggingface.co/aisingapore for all artifacts in this paper, including training data and other versions of SEA-LION", + "bbox": [ + 112, + 883, + 487, + 919 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3 Post-training", + "text_level": 1, + "bbox": [ + 114, + 84, + 263, + 99 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Post-training data", + "text_level": 1, + "bbox": [ + 114, + 118, + 302, + 134 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The post-training data consists of 3 subsets of data for Stage 1 IFT, Stage 2 IFT, and the Preference dataset for alignment, respectively. We describe the training data information of each step as follows.", + "bbox": [ + 112, + 147, + 487, + 212 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Stage 1 IFT. In this step, we employ Infinity-Instruct [Foundation and Chat] (Beijing Academy of Artificial Intelligence, 2024) and OpenMath-Instruct 2 (Toshniwal et al., 2024) to improve the mathematical, reasoning, and coding skills of the instruction model. The full details of the training data are shown in Appendix 7.", + "bbox": [ + 112, + 215, + 487, + 344 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Stage 2 IFT. Then, in this step, we use generalized large-scale instructions on the previous instruction model. In particular, we employ 22 existing datasets (written in English, Thai, and Vietnamese) and formulate new 22 synthetic datasets using various models and techniques to create SEA instruction datasets (see Appendix A.3 for the full data generation details). As shown in Appendix 9, we use a total of 7,298,828 instruction samples that cover 11 languages.", + "bbox": [ + 112, + 348, + 487, + 508 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Helpfulness and preference alignment. We also conduct an alignment learning on top of the instruction model using a feedback dataset called UltraFeedback (Cui et al., 2024). In addition, we also synthesized the SEA version of the UltraFeedback using NemoTron-70b with Gemma2 as a reward model, see Appendix A.4 for the full details.", + "bbox": [ + 112, + 513, + 487, + 625 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/9138939f8b29aa04aaea835f7a6d240c1c7202541e014a22e29a7fadfe0d65fb.jpg", + "image_caption": [ + "Figure 1: Training process of Llama-SEA-LION-8B-IT (Section 3.2.1). The post-training process consists of 2 stages of instruction fine-tuning, an alignment stage and multiple merge stages. Dotted lines denote a merge stage and solid lines denote an alignment stage." + ], + "image_footnote": [], + "bbox": [ + 115, + 644, + 480, + 826 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 Post-training process", + "text_level": 1, + "bbox": [ + 509, + 84, + 721, + 99 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We use LLaMaFactory (Zheng et al., 2024b) with DeepSpeed (Rasley et al., 2020) for all Instruction Fine Tuning (IFT) and alignment steps. All IFT stages are performed using full model finetuning, where the models are from the previous step (Section 2.2) and existing models. We use MergeKit (Goddard et al., 2024) with a value of 1 for weight and density parameters for all merge steps. Models selected for merging are selected empirically, based on the openness of model licenses, the suitability for merging and performance.", + "bbox": [ + 507, + 105, + 882, + 282 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2.1 Llama-SEA-LION-8B-IT", + "text_level": 1, + "bbox": [ + 507, + 290, + 769, + 305 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Stage 1 IFT As shown in Figure 1, we started off the post-training phase with IFT of Llama-SEA-LION-8B with the Infinity Instruct (Foundation) (Beijing Academy of Artificial Intelligence, 2024) and OpenMathInstruct2 (Toshniwal et al., 2024) datasets. Both datasets contain approximately 9.5 million instruction pairs, primarily in English and centered around reasoning, math, and code. We refer to the model at this stage as Stage1-Llama.", + "bbox": [ + 507, + 309, + 882, + 468 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Stage 2 IFT We performed a second round of IFT using the SEA-Instruct dataset, which consists of approximately 7.3 million instruction pairs, of which 5 million instruction pairs are generated using the Gemma-2-27B-Instruct (Rivière et al., 2024) model and the Qwen2.5-32B-Instruct model (Yang et al., 2024a) in SEA languages. The remaining are English language instruction pairs from the Infinity-Instruct (Chat) (Beijing Academy of Artificial Intelligence, 2024) dataset. We refer to the model at this stage as Stage-2-Llama.", + "bbox": [ + 507, + 470, + 882, + 662 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "First merge After finishing the IFT stages, we performed the first of a series of merges by merging Stage-1-Llama and Stage-2-Llama into the Llama-SEA-LION-8B using the DARE TIES (Yu et al., 2024; Ilharco et al., 2023) method. We refer to the model at this stage as Merge-1-Llama.", + "bbox": [ + 507, + 663, + 882, + 760 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Second merge In order to mitigate catastrophic forgetting due to the fine-tuning process (Alexandrov et al., 2024), we performed the second round of merging by merging top-performing instruction-tuned models that share the Llama 3.1 lineage. We merge the original Llama-3.1-8B-Instruct, Llama3-8B-SEA-LION-v2.1-Instruct (SEA-LION Team, 2024), and SuperNova-Lite (Arcee-AI, 2024) into Merge-1-Llama using the Consensus TA (Wang et al., 2024b; Ilharco et al., 2023) merge method.", + "bbox": [ + 507, + 760, + 882, + 921 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We refer to the model at this stage as Merge-2-Llama.", + "bbox": [ + 112, + 84, + 489, + 115 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Helpfulness and preference alignment We performed one round of alignment on Merge-2-Llama using SimPO (Meng et al., 2024) with the SEA-Preference dataset. We refer to the model at this stage as Aligned-SimPO-Llama.", + "bbox": [ + 112, + 117, + 489, + 198 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Final merge Lastly, we perform a merge using the DELLA-Linear merge. With the original Llama3.1-8B-Instruct model as the base for merging, we merge in Merge-2-Llama and Aligned-SIMPO-Llama to produce the final model, Llama-SEA-LION-v3-9B-IT.", + "bbox": [ + 112, + 199, + 490, + 294 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.2 Gemma-SEA-LION-9B-IT", + "text_level": 1, + "bbox": [ + 112, + 325, + 384, + 338 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4c339bc0c829686d5690eeb36036737c9fc9fc6bbb2f0e5a2fc19dd66e119f36.jpg", + "image_caption": [ + "Figure 2: Training process of Gemma-SEA-LION-9B-IT (Section 3.2.2). The post-training process comprises two stages of instruction fine-tuning, an alignment stage, and multiple merge stages. Dotted lines denote a merge stage and solid lines denote an alignment stage." + ], + "image_footnote": [], + "bbox": [ + 117, + 355, + 485, + 538 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Stage 1 and Stage 2 IFT Similar to the Llama-SEA-LION-8B-IT, we started off the post-training phase with both stages of IFT using the same datasets on the Gemma-2-9B model (Rivière et al., 2024). We refer to both models at stage 1 and stage 2 as Stage-1-Gemma and Stage-2-Gemma, respectively.", + "bbox": [ + 112, + 631, + 489, + 727 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "First merge We merge the Gemma-2-9B-IT (Rivière et al., 2024) and Stage-2-Gemma into Gemma-2-9B using the DELLA Linear method. We refer to the model at this stage as the Merge-1-Gemma.", + "bbox": [ + 112, + 728, + 489, + 791 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Helpfulness and preference alignment Using the Merge-1-Gemma as the base model, we performed one round of alignment using SimPO with the SEA-Preference dataset. We refer to the model at this stage as the Aligned-SimPO-Gemma.", + "bbox": [ + 112, + 793, + 489, + 872 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Final merge Finally, using the Gemma-2-9B model as the base model, we merged Merge-1-Gemma, FuseChat Gemma-2-9B-Instruct (Yang", + "bbox": [ + 112, + 873, + 489, + 921 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "et al., 2024b), Gemma-SEA-LION-9B, and Aligned-SimPO-Gemma into it to produce the final model Gemma-SEA-LION-9B-IT.", + "bbox": [ + 507, + 84, + 884, + 131 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3 Discussion", + "text_level": 1, + "bbox": [ + 507, + 146, + 638, + 160 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This post-training workflow emphasizes the careful balance between general capabilities, SEA-specific linguistic fluency, and natural conversational abilities. Each step in the workflow is designed to progressively refine the model, ensuring it meets the diverse needs of users in the Southeast Asian region.", + "bbox": [ + 507, + 168, + 884, + 281 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The entire post-training process for Gemma-SEA-LION-9B-IT and Llama-SEA-LION-8B-IT took approximately 1350 and 1024 GPU hours, respectively, on eight H100 GPUs. To make the training efficient, all post-training steps utilize Liger Kernel (Hsu et al., 2024) for substantial memory savings of approximately $60\\%$ .", + "bbox": [ + 507, + 282, + 884, + 395 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4 Experimental Setup", + "text_level": 1, + "bbox": [ + 507, + 409, + 717, + 426 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1 Competitive methods", + "text_level": 1, + "bbox": [ + 507, + 437, + 722, + 453 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For the evaluation, we compared our models against well-known LLMs for multilingual and SEA languages, such as SeALMsv3 (Zhang et al., 2024a), Sailorv2 (Team, 2024), Qwen 2.5 (Yang et al., 2024a), Gemma 2 (Riviere et al., 2024) and Llama 3.1 (Dubey et al., 2024), where the parameters of those models are less than 10 billion parameters, similar to our models.", + "bbox": [ + 507, + 458, + 884, + 588 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2 Evaluation Benchmarks", + "text_level": 1, + "bbox": [ + 507, + 602, + 744, + 615 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To evaluate the robustness of our proposed models, we compare our models to competitors in three benchmarks.", + "bbox": [ + 507, + 625, + 882, + 671 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "SEA Benchmarks. We evaluated the multilingual performance of each LLM using the SEA-HELM Leaderboard (Leong et al., 2023; Susanto et al., 2025) $^{5}$ . We selected SEA-HELM because the design choice of this benchmark reflects the performance of SEA culture and knowledge the most compared with other existing benchmarks (DAMO-NLP-SG, 2024; Lovenia et al., 2024; Wang et al., 2024a). We also evaluate on a wide-range SEA coverage language benchmark called SEACrowd (Lovenia et al., 2024). This benchmark consists of all SEA languages for natural language understanding and generation datasets.", + "bbox": [ + 507, + 674, + 884, + 883 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "5Please visit https://leaderboard.sea-lion.ai/ for live score update of SEA-LION.", + "bbox": [ + 507, + 894, + 882, + 921 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/e406902fdae58a202c5ead85c91057043dad6359c378e31f2bf590aaf4e7548a.jpg", + "table_caption": [ + "SEA-HELM" + ], + "table_footnote": [], + "table_body": "
NU, NLG, NLR, NLIInstruction Following
ModelsAverageIDVITHTAIDVITH
Meta-Llama-3.1-8B35.3742.3340.6735.1338.8816.1919.059.00
SeaLLMs-v3-7B37.0444.7948.2943.5327.4526.6735.2426.00
Gemma-2-9B41.4847.6543.2842.0053.264.763.8110.00
Qwen2.5-7B41.9851.6352.1746.5536.6031.4336.1930.00
Sailor2-8B42.6253.2347.3346.6445.0430.4830.4835.00
Llama-SEA-LION-8B41.4244.9846.2542.7943.0325.7132.3823.00
Gemma-SEA-LION-9B48.6757.1649.3947.1660.5625.7120.0027.00
", + "bbox": [ + 119, + 99, + 877, + 258 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/4867daa85c3c9dd6e65602d241de43dd14c0427a828b2219e89a5413008cb6c3.jpg", + "table_caption": [ + "Table 1: SEA-HELM multilingual benchmark on NLU, NLG, NLR, NLI and instruction following on base and continued pre-trained models of similar sizes.", + "Open LLM Leaderboard" + ], + "table_footnote": [], + "table_body": "
ModelsAverageMMLU-PROBBHGPQAMATH Lvl 5IFEval (EN)MUSR
Meta-Llama-3.1-8B13.924.9525.296.325.1412.78.98
Sailor2-8B17.7125.7427.624.877.0221.9519.03
Gemma-2-9B21.1534.4834.110.5113.1420.414.3
SeaLLMs-v3-7B24.0035.7134.579.2818.8132.9412.68
Qwen2.5-7B24.9937.3935.819.9618.8833.7414.14
Llama-SEA-LION-8B16.6127.626.047.499.8916.5612.07
Gemma-SEA-LION-9B22.4132.7837.2410.299.8930.1214.11
", + "bbox": [ + 119, + 323, + 877, + 442 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 2: Open LLM Leaderboard benchmarks across different continued pre-trained models of similar sizes.", + "bbox": [ + 129, + 451, + 862, + 467 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "However, due to maintenance reasons, we cannot reproduce the NLG benchmark of SEACrowd. Therefore, we experiment only with the NLU benchmark (zero-shot), which has 131 data subsets, 7 tasks, and 31 SEA indigenous languages.", + "bbox": [ + 112, + 493, + 489, + 571 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "English performance. We also evaluated the English performance of the models using the Open LLM Leaderboard (HuggingFace, 2024). This is because English is also widely used in SEA countries. Therefore, we need to evaluate the understanding and knowledge of LLMs in the English benchmark as well. The leaderboard consists of six benchmarks, IFEval (Zhou et al., 2023), Big Bench Hard (Suzgun et al., 2023), MATH (Hendrycks et al., 2021), GPQA (Rein et al., 2023), MuSR (Sprague et al., 2024) and MMLUPRO (Wang et al., 2024c). Moreover, we also evaluate the CPT models on SEA-HELM and the Open LLM Leaderboard since these benchmarks support the CPT evaluation.", + "bbox": [ + 112, + 573, + 489, + 815 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5 Experimental Results", + "text_level": 1, + "bbox": [ + 112, + 829, + 334, + 845 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To understand the robustness and generalization of our proposed models, we conduct three studies as follows. Section 5.1 evaluates the robustness of continual pre-training models using SEA-HELM", + "bbox": [ + 112, + 856, + 489, + 921 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "and the Open LLM leaderboard. In Section 5.2, we compare our instruction fine-tuning models with competitors in three benchmarks to demonstrate the generalization of our models. Lastly, we discuss the design choice of our models in Section 5.3.", + "bbox": [ + 507, + 492, + 882, + 571 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.1 Continued Pre-Training Results", + "text_level": 1, + "bbox": [ + 507, + 590, + 806, + 606 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "SEA performance. The CPT stage is primarily focused on gaining SEA language capabilities and knowledge. For the purpose of comparison against base and CPT models, as shown in Table 1, we observed a 6.05 and 7.19 average SEA-HELM performance increase over the Meta-Llama-3.1-8B and Gemma-2-9B for Llama-SEA-LION-8B and Gemma-SEA-LION-9B, respectively. We observed a much larger average increase with instruction following capabilities in particular, which we attribute to the fact that our CPT models are trained from the instruction models rather than from the base models. Moreover, in the average performance, we found that our Gemma-SEA-LION-9B models perform the best compared to other models. This emphasizes a strong reason to perform CPT for improving the performance of SEA languages, rather than skipping the CPT and performing SFT directly.", + "bbox": [ + 505, + 615, + 884, + 921 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/0f75f24e6df40fe78b1dd64776ad1d19b8df275d0fba107911bc92e1b8175379.jpg", + "table_caption": [ + "SEA-HELM" + ], + "table_footnote": [], + "table_body": "
NUL, NLG, NLR, NLIInstruction FollowingMTBench
ModelsAverageIDVITHTAIDVITHIDVITH
SeaLLMs-v3-7B-Chat39.1942.7248.5042.5912.0657.1453.3347.0059.8165.2456.59
Llama-3.1-8B-Instruct41.4851.5051.3145.3215.4077.1475.2463.0056.3857.5954.34
Sailor2-8B-Chat43.1348.9848.0145.4428.2949.5245.7140.0069.7666.9773.94
Qwen2.5-7B-Instruct44.5860.2853.4653.4321.0381.9069.5266.0065.6666.8068.71
Gemma-2-9B-IT55.3364.0459.8657.2252.2888.5778.1071.0068.7868.3773.51
Stage-1-Llama50.7651.8451.8346.2327.5369.5273.3359.0042.7446.4146.46
Stage-2-Llama59.4953.8755.1850.9244.8077.1476.1967.0050.9053.7246.97
Merge-1-Llama59.3656.7356.8251.7146.6381.9082.8667.0057.0454.0150.28
Merge-2-Llama58.0159.1952.6351.8935.4087.6280.9578.0056.3859.3258.86
Aligned-SimPO-Llama51.3054.8651.6946.7726.4082.8680.0068.0068.2064.6864.92
Llama-SEA-LION-8B-IT61.8460.5061.4855.9243.6184.7685.7176.0062.6568.3265.13
Stage-1-Gemma56.5655.0654.5151.9642.7466.6774.2961.0047.3547.2655.05
Stage-2-Gemma66.6664.1061.7656.9057.8589.5282.8676.0060.5458.9358.76
Merge-1-Gemma69.2666.2564.9559.7460.4189.5291.4382.0066.4564.4765.00
Aligned-SimPO-Gemma69.3765.6965.4759.5157.3886.6788.5778.0068.8973.6773.51
Gemma-SEA-LION-9B-IT69.3566.2664.9359.2358.8294.2988.5778.0065.8573.2769.07
", + "bbox": [ + 119, + 95, + 878, + 337 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/95807f122e753274efc4c43da49e9d40d32d3ff051642f5e33ffc65fc2dd8d5a.jpg", + "image_caption": [ + "Figure 3: Zero-shot model performance across NLU tasks in SEA languages." + ], + "image_footnote": [], + "bbox": [ + 121, + 391, + 687, + 541 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/4e16748c2d5d2a0090d54b396c70b170858f6680defc5b52a25cfe87b65aacd2.jpg", + "table_caption": [ + "Table 3: SEA-HELM multilingual benchmark on NLU, NLG, NLR, NLI, instruction following and multi-turn chat on instruct models of similar sizes." + ], + "table_footnote": [], + "table_body": "
ModelNLU Score
SeaLLMs-v3-7B-chat52.68
Llama-3.1-8B-Instruct49.94
Sailor2-8B-Chat60.21
Qwen2.5-7B-Instruct54.51
Gemma-2-9B-IT60.21
Llama-SEA-LION-8B-IT55.10
Gemma-SEA-LION-9B-IT64.13
", + "bbox": [ + 702, + 414, + 878, + 510 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 4: The average NLU performance across 131 data subsets and 31 indigenous languages.", + "bbox": [ + 700, + 519, + 882, + 576 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "English performance. For the English performance, as shown in Table 2, both CPT models also managed to perform competitively against the Meta-Llama-3.1-8B and Gemma-2-9B base models on the Open LLM Leaderboard benchmarks. This indicates that our choice of retraining with a proportion of $25\\%$ English tokens has been beneficial in mitigating catastrophic forgetting, which has been shown to stem from CPT (Zheng et al., 2024a). Although our CPT models perform lower than Qwen and SeaLLMs on this benchmark, we outperform them on the SEA language instead, which is the main focus of this work.", + "bbox": [ + 112, + 595, + 489, + 802 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2 Instruction Fine-tuning Results", + "text_level": 1, + "bbox": [ + 112, + 816, + 406, + 832 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this study, we compare our models with competitors on SEA-HELM, SEACrowd, and the Open LLM Leaderboard as follows.", + "bbox": [ + 112, + 840, + 487, + 885 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "SEA-HELM. As shown in Table 3, the SEA-HELM benchmark performance demonstrates that", + "bbox": [ + 112, + 889, + 489, + 921 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "our instruct models, Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, attain competitive performance in SEA languages, with Gemma-SEA-LION-9B-IT achieving one of the highest average performances. Moreover, we significantly improve the performance of Llama-3.1-8B-Instruct from 41.48 to 61.84 using Llama-SEA-LION-8B-IT, while Gemma-SEA-LION-9B-IT achieves 14.02 improvement points compared to Gemma-2-9B-IT. Both Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT outperform other SEA languages-focused LLMs, such as *Sailor2-8B-Chat* and *SEALLMs-v3-7B-Chat*, with an average score of 69.35 across all the languages covered by the SEAHELM benchmark, apart from the SEA-MTbench tasks. This conforms with the previous results on the CPT models (Section 5.1) that our CPT model performs the best on SEA languages, resulting in the best performer in this experiment.", + "bbox": [ + 507, + 594, + 884, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "SEACrowd. Other than evaluating on some SEA", + "bbox": [ + 507, + 904, + 882, + 921 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/78429de77740e6d1e87d54a49d5ac0a30a1623513b766b33f3be80741d5f7606.jpg", + "table_caption": [ + "Open LLM Leaderboard" + ], + "table_footnote": [], + "table_body": "
ModelsAverageMMLU-PROBBHGPQAMATH Lvl 5IFEval (EN)MUSR
Sailor2-8B-Chat16.3727.9327.153.470.0037.492.19
SeaLLMs-v3-7B-Chat22.4933.9324.377.2715.8644.109.38
Llama-3.1-8B-Instruct27.8829.3626.1010.6317.4577.036.75
Qwen2.5-7B-Instruct27.9337.0034.7210.180.0076.349.34
Gemma-2-9B-IT28.8631.9542.1414.770.2374.369.74
Stage-1-Llama24.5125.8726.327.8319.2662.894.88
Stage-2-Llama27.7528.1024.647.7219.5678.787.74
Merge-1-Llama27.4927.4726.228.2819.7976.167.04
Merge-2-Llama29.9629.9228.789.9619.9482.618.54
Aligned-SimPO-Llama30.5830.8434.318.3926.5975.767.61
Llama-SEA-LION-8B-IT30.3931.0129.4710.4022.5880.358.54
Stage-1-Gemma29.8833.3438.5110.7424.1756.8715.66
Stage-2-Gemma33.4834.6736.0611.7420.7783.0014.61
Merge-1-Gemma35.1536.2241.4215.3226.2882.099.59
Aligned-SimPO-Gemma35.3137.6542.3814.9927.7980.238.82
Gemma-SEA-LION-9B-IT35.4336.9443.3915.1024.2481.8511.07
", + "bbox": [ + 119, + 95, + 877, + 337 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 5: Open LLM Leaderboard benchmarks across different instruct models of similar sizes.", + "bbox": [ + 179, + 347, + 815, + 361 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "languages like SEA-HELM, we also evaluated our model compared to competitors on 31 SEA indigenous languages using SEACrowd-NLU. Note that, for this study, we use only the best settings of our models from the previous experiment (Table 3). As shown in Table 4, we observe a state-of-the-art result from Gemma-SEA-LION-9B-IT by achieving 64.13 points on the NLU benchmark, while Llama-SEA-LION-8B-IT improves its baseline from 49.94 to 55.10 points. Moreover, the results from Figure 3 also emphasize the robustness of our model by reaching more than 80 points on this benchmark, while SeaLLMs and Llama-3.1 have only a few cases where the performance exceeds 80 points. These results emphasize the robustness of our models by achieving the state-of-the-art with a model parameter less than 10B on SEA benchmarks, including both traditional classical NLP benchmark (SEACrowd-NLU) and modern LLM benchmark (SEA-HELM).", + "bbox": [ + 115, + 387, + 487, + 708 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "English performance. We also evaluate the performance of a widely used language, English, to observe a difference between the results of SEA and English. The Open LLM Leaderboard performance is shown in Table 5. Both Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT performed competitively in English language, math, and reasoning tasks, with Gemma-SEA-LION-9B-IT achieving the highest average score of 35.43. Moreover, we notice that the SEA models (Sailor and SeaLLMs) failed to perform on the English dataset. This might be because these models are optimized for SEA languages during supervised fine-tuning, and English", + "bbox": [ + 115, + 712, + 487, + 920 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "performance decreased as a result. In contrast, our models balance the performance between SEA and English knowledge, resulting in a high score for all benchmarks.", + "bbox": [ + 512, + 388, + 880, + 449 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.3 Performance Analysis", + "text_level": 1, + "bbox": [ + 512, + 464, + 726, + 479 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this study, we discuss the performance improvement in each design decision of our models (Tables 3 and 5) as follows.", + "bbox": [ + 512, + 486, + 882, + 532 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Stage 1: English instruction fine tuning In Stage 1 IFT, the focus is predominantly on gaining general capabilities in math, code and general instruction following in the English language. Although our CPT models are based off of the instruct versions of Llama-3.1-8B, the CPT process has eroded the instruction following capabilities (See Table 5). We observe an increase of 3.86 and 9.72 for Stage-1-Llama and Stage-1-Gemma respectively in English instruction following capabilities on the IFEval benchmark. We also observe an average increase of 7.9 for Stage-1-Llama and 7.47 for Stage-1-Gemma for the SEA-HELM benchmark.", + "bbox": [ + 512, + 535, + 882, + 741 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Stage 2: Multilingual instruction fine tuning In Stage 2 IFT, the focus is on multilingual and reasoning capabilities. By instruction fine tuning on SEA languages and higher complexity English instruction pairs, the Stage 2 models saw an average increase of 8.73 for Stage-2-Llama and 10.1 for Stage-2-Gemma over Stage 1 models on the SEAHELM benchmark.", + "bbox": [ + 512, + 744, + 882, + 870 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Merge 1: Combining Stage 1 and Stage 2 Despite the significant gains observed in Stage 1 and 2, we observed that the effects of catastrophic for", + "bbox": [ + 512, + 873, + 882, + 920 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "getting from earlier stages could still be observed after Stage 2. In order to mitigate this, we merge Stage 1 and Stage 2 models into the CPT model, after which we we observed an average increase of 2.6 for Merge-1-Gemma. We also observed an increase across all SEA-HELM benchmark tasks for Merge-1-Llama.", + "bbox": [ + 112, + 84, + 487, + 195 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Merge 2: Incorporating instruct models To reintroduce helpfulness, relevance and informativeness of responses observed in Llama 3.1 and Gemma 2 models, we perform further merges of open-source instruct models. While we observed significant increases in MT-Bench benchmark scores for Vietnamese and Thai, we also observed a slight degradation of average SEA-HELM performance as well as a slight degradation of Indonesian MT-Bench scores, which we view as acceptable trade-offs for the significant performance increases in Vietnamese and Thai.", + "bbox": [ + 115, + 197, + 489, + 388 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Alignment steps In the alignment step to align the models to human preference, we prioritize the SEA MTBench performance over the other SEA-HELM benchmark tasks. We observed a broad increase in SEA MTBench performances across all languages for both models. However, this comes with minor degradation of instruction following capabilities and overall Indonesian SEA-HELM performance. The alignment step encourages longer, more helpful and sensitive responses but hurts performance on task-specific benchmarks and instruction following in some languages – an issue we address in the next step.", + "bbox": [ + 115, + 390, + 489, + 598 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Final merge: Combining aligned models To compensate for the capability degradation in the previous steps, we merge Merge-2-Llama and Merge-1-Gemma with Aligned-SimPO-Llama and Aligned-SimPO-Gemma and various open sourced pretrained models describe in sections 3.2.1 and 3.2.2 for their respective model families. For Llama-SEA-LION-8B-IT, we observed a significant increase in average SEA-HELM performance (61.84) from the alignment stage (51.30), mainly from the increase in performance for the core tasks in SEA-HELM. This performance increase demonstrates the value of empirical selection of pre-trained models to be merged in based on each model's strengths and weaknesses to produce a far superior model. For Gemma-SEA-LION-9B-IT, it easily achieves higher performance compared to the Llama-SEA-LION-8B-IT with fewer post training steps. We attribute this performance to the high performance of the base Gemma 2 model and also to the larger vocab", + "bbox": [ + 115, + 599, + 489, + 920 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ulary size which have been demonstrated (Takase et al., 2024) to produce better models.", + "bbox": [ + 509, + 84, + 880, + 116 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6 Related Works", + "text_level": 1, + "bbox": [ + 509, + 130, + 672, + 143 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Recently, researchers have proposed large language models that support multilingual settings. Llama (Dubey et al., 2024) is the prior effort to release an open-source large language model for the research community to develop their own models. Then, Qwen (Yang et al., 2024a) and Gemma (Rivière et al., 2024) introduced open-source LLMs that perform comparably or better than Llama with a larger amount of training data and many supported languages for these recent models. Massively multilingual open-source models like Bloom (Scao et al., 2022) and Aya (Ustun et al., 2024) also support a very wide range of languages, including some SEA languages. Although these models demonstrate a robust performance in English benchmarks, they mostly underperformed on SEA benchmarks that tested for SEA languages, SEA knowledge and cultural understanding (Lovenia et al., 2024; Susanto et al., 2025), presumably due to a lack of language support for certain SEA languages or cultures.", + "bbox": [ + 509, + 156, + 884, + 476 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In the SEA community, many works propose a large language model that is designed specifically for SEA languages by adding more SEA tokens in the training process, such as SeaLLMs (Nguyen et al., 2024) and Sailor (Sailor2 Team, 2024). However, the performance of these models is robust only on in-domain datasets or favors only some tasks (i.e., classical NLP datasets). This is because the design choice in the pre-training or fine-tuning of these models is not well studied, e.g., performing a single SFT step with low-quality datasets written in some SEA languages, resulting in a slight improvement on SEA benchmarks. To create a robust SEA LLM, we need to carefully balance language representation and design both pre-training and post-training (i.e., SFT, alignment, and model merging) for SEA contexts.", + "bbox": [ + 509, + 479, + 882, + 751 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7 Conclusion", + "text_level": 1, + "bbox": [ + 509, + 766, + 640, + 781 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Despite the sizable population and language diversity in Southeast Asia, there remains a scarcity of resources and accurate linguistic and cultural representation with open-source LLMs. In this paper, we introduce Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, two multilingual LLMs comprehensively trained to achieve state-of-the-art performances in SEA languages, based on the Llama and", + "bbox": [ + 509, + 793, + 884, + 920 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Gemma family of LLMs. SEA-LION represents the next advancement in the development of LLMs that explicitly supports SEA languages. Both models are fully open-source and available for commercial use to increase accessibility and innovation in multilingual LLMs in Southeast Asia. We will make our resources publicly available — including the dataset, training scripts, training checkpoints, and all fine-tuned models, even those that achieve state-of-the-art performance on the benchmarks — to establish solid baselines, ensure reproducibility, and support future research focused on culturally and professionally relevant SEA applications.", + "bbox": [ + 112, + 84, + 492, + 294 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Acknowledgment", + "text_level": 1, + "bbox": [ + 114, + 307, + 270, + 324 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This research is supported by the National Research Foundation, Singapore, under its National Large Language Models Funding Initiative. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author(s) and do not reflect the views of the National Research Foundation, Singapore.", + "bbox": [ + 112, + 332, + 489, + 447 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Limitation", + "text_level": 1, + "bbox": [ + 114, + 460, + 211, + 474 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Although we propose the state-of-the-art SEA LLMs, we found that the benchmark might not cover all the properties and languages we want to evaluate. For example, SEA-HELM is a robustness benchmark, but only covers four languages. SEACrowd is a benchmark that covers all SEA languages, but it is only classical NLP datasets (no chat or instruction following datasets). We require a more holistic SEA benchmark that covers LLM-specific tasks written in all SEA languages. However, with the current evaluation design choice, these benchmarks are the best design choice for current SEA research works.", + "bbox": [ + 112, + 486, + 489, + 694 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Moreover, we conduct experiments using only 8 and 9 billion parameter models. We argue that this is the most commonly used model size in real-world scenarios. In addition, our method can and should also work with a higher or smaller model size since our proposed technique does not rely on the model size, as we demonstrated by applying the SFT and alignment techniques on both Llama and Gemma models.", + "bbox": [ + 112, + 696, + 489, + 839 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 114, + 869, + 213, + 883 + ], + "page_idx": 8 + }, + { + "type": "ref_text", + "text": "SCB 10X, VISTEC, and SEACrowd. 2024. Thai llm leaderboard.", + "bbox": [ + 114, + 892, + 487, + 919 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "AI Singapore AI Products Team. 2024. Sea-helm.", + "AISG AI Singapore. 2023. Sea-lion-pile.", + "AISG AI Singapore. 2025. Sea-lion-pile-v2.", + "Anton Alexandrov, Veselin Raychev, Mark Niklas Mueller, Ce Zhang, Martin Vechev, and Kristina Toutanova. 2024. Mitigating catastrophic forgetting in language transfer via model merging. In Findings of the Association for Computational Linguistics: EMNLP 2024, pages 17167-17186, Miami, Florida, USA. Association for Computational Linguistics.", + "Arcee-AI. 2024. Llama-3.1-supernova-lite.", + "Adrien Barbaresi. 2021. Trafilatura: A Web Scraping Library and Command-Line Tool for Text Discovery and Extraction. In Proceedings of the Joint Conference of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing: System Demonstrations, pages 122-131. Association for Computational Linguistics.", + "BAAI Beijing Academy of Artificial Intelligence. 2024. Infinity instruct.", + "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners. In Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual.", + "CommonCrawl. 2024. Commoncrawl.", + "Ganqu Cui, Lifan Yuan, Ning Ding, Guanming Yao, Bingxiang He, Wei Zhu, Yuan Ni, Guotong Xie, Ruobing Xie, Yankai Lin, Zhiyuan Liu, and Maosong Sun. 2024. ULTRAFEEDBACK: boosting language models with scaled AI feedback. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net.", + "DAMO-NLP-SG. 2024. Seaexam.", + "Longxu Dou, Qian Liu, Guangtao Zeng, Jia Guo, Jiahui Zhou, Wei Lu, and Min Lin. 2024. *Sailor: Open language models for south-east asia. CoRR*, abs/2404.03608.", + "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, Anirudh Goyal, Anthony Hartshorn, Aobo Yang," + ], + "bbox": [ + 510, + 85, + 884, + 921 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, Aurélien Rodriguez, Austen Gregerson, Ava Spataru, Baptiste Rozière, Bethany Biron, Binh Tang, Bobbie Chern, Charlotte Caucheteux, Chaya Nayak, Chloe Bi, Chris Marra, Chris McConnell, Christian Keller, Christophe Touret, Chunyang Wu, Corinne Wong, Cristian Canton Ferrer, Cyrus Nikolaidis, Damien Alonsius, Daniel Song, Danielle Pintz, Danny Livshits, David Esiobu, Dhruv Choudhary, Dhruv Mahajan, Diego Garcia-Olano, Diego Perino, Dieuwke Hupkes, Egor Lakomkin, Ehab AlBadawy, Elina Lobanova, Emily Dinan, Eric Michael Smith, Filip Radenovic, Frank Zhang, Gabriel Synnaeve, Gabrielle Lee, Georgia Lewis Anderson, Graeme Nail, Grégoire Mialon, Guan Pang, Guillem Cucurell, Hailey Nguyen, Hannah Korevaar, Hu Xu, Hugo Touvron, Iliyan Zarov, Imanol Arrieta Ibarra, Isabel M. Kloumann, Ishan Misra, Ivan Evtimov, Jade Copet, Jaewon Lee, Jan Geffert, Jana Vranes, Jason Park, Jay Mahadeokar, Jeet Shah, Jelmer van der Linde, Jennifer Billock, Jenny Hong, Jenya Lee, Jeremy Fu, Jianfeng Chi, Jianyu Huang, Jiawen Liu, Jie Wang, Jiecao Yu, Joanna Bitton, Joe Spisak, Jongsoo Park, Joseph Rocca, Joshua Johnstun, Joshua Saxe, Junteng Jia, Kalyan Vasuden Alwala, Kartikeya Upasani, Kate Plawiak, Ke Li, Kenneth Heafield, Kevin Stone, and et al. 2024. The llama 3 herd of models. CoRR abs/2407.21783.", + "bbox": [ + 132, + 85, + 489, + 463 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Wikipedia Foundation. 2024. Wikipedia enterprise. html dumps downloads.", + "bbox": [ + 115, + 475, + 485, + 502 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Charles Goddard, Shamane Siriwardhana, Malikeh Ehghaghi, Luke Meyers, Vladimir Karpukhin, Brian Benedict, Mark McQuade, and Jacob Solawetz. 2024. Arcee's mergekit: A toolkit for merging large language models. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: EMNLP 2024 - Industry Track, Miami, Florida, USA, November 12-16, 2024, pages 477-485. Association for Computational Linguistics.", + "bbox": [ + 115, + 514, + 489, + 633 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021. Measuring mathematical problem solving with the MATH dataset. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual.", + "bbox": [ + 115, + 645, + 487, + 736 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Pin-Lun Hsu, Yun Dai, Vignesh Kothapalli, Qingquan Song, Shao Tang, Siyu Zhu, Steven Shimizu, Shivam Sahni, Haowen Ning, and Yanning Chen. 2024. Liger kernel: Efficient triton kernels for lIm training. arXiv preprint arXiv:2410.10989.", + "bbox": [ + 115, + 749, + 487, + 815 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Shengding Hu, Yuge Tu, Xu Han, Chaoqun He, Ganqu Cui, Xiang Long, Zhi Zheng, Yewei Fang, Yuxiang Huang, Weilin Zhao, Xinrong Zhang, Zhen Leng Thai, Kai Zhang, Chongyi Wang, Yuan Yao, Chenyang Zhao, Jie Zhou, Jie Cai, Zhongwu Zhai, Ning Ding, Chao Jia, Guoyang Zeng, Dahai Li, Zhiyuan Liu, and Maosong Sun. 2024. Minicpm: Un", + "bbox": [ + 115, + 827, + 489, + 920 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "veiling the potential of small language models with scalable training strategies. CoRR, abs/2404.06395.", + "bbox": [ + 529, + 85, + 880, + 112 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "HuggingFace. 2024. Open llm leaderboard.", + "bbox": [ + 510, + 120, + 806, + 135 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Gabriel Ilharco, Marco Túlio Ribeiro, Mitchell Wortsman, Ludwig Schmidt, Hannaneh Hajishirzi, and Ali Farhadi. 2023. Editing models with task arithmetic. In *The Eleventh International Conference on Learning Representations*, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net.", + "bbox": [ + 510, + 143, + 884, + 222 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Armand Joulin, Edouard Grave, Piotr Bojanowski, and Tomas Mikolov. 2017. Bag of tricks for efficient text classification. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 2, Short Papers, pages 427-431. Association for Computational Linguistics.", + "bbox": [ + 510, + 230, + 884, + 322 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Wei Qi Leong, Jian Gang Ngui, Yosephine Susanto, Hamsawardhini Rengarajan, Kengatharayer Sarveswaran, and William-Chandra Tjhi. 2023. BHASA: A holistic southeast asian linguistic and cultural evaluation suite for large language models. CoRR, abs/2309.06085.", + "bbox": [ + 510, + 331, + 884, + 409 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Ilya Loshchilov and Frank Hutter. 2019. Decoupled weight decay regularization. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net.", + "bbox": [ + 510, + 419, + 884, + 483 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Holy Lovenia, Rahmad Mahendra, Salsabil Maulana Akbar, Lester James V. Miranda, Jennifer Santoso, Elyanah Aco, Akhdan Fadhilah, Jonibek Mansurov, Joseph Marvin Imperial, Onno Kampman, Joel Ruben Antony Moniz, Muhammad Ravi Shulthan Habibi, Frederikus Hudi, Jann Montalan, Ryan Hadiwijaya, Joanito Agili Lopo, William Nixon, Borje Karlsson, James Jaya, Ryandito Diandaru, Yuze Gao, Patrick Amadeus Irawan, Bin Wang, Jan Christian Blaise Cruz, Chenxi Whitehouse, Ivan Halim Parmonangan, Maria Khelli, Wenyu Zhang, Lucky Susanto, Reynard Adha Ryanda, Sonny Lazuardi Hermawan, Dan John Velasco, Muhammad Dehan Al Koutsar, Willy Fitra Hendria, Yasmin Moslem, Noah Flynn, Muhammad Farid Adilazuarda, Haochen Li, Johannes Lee, R. Damanhuri, Shuo Sun, Muhammad Reza Qorib, Amirbek Djanibekov, Wei Qi Leong, Quyet V. Do, Niklas Muennighoff, Tanrada Pansuwan, Ilham Firdausi Putra, Yan Xu, Ngee Tai Chia, Ayu Purwarianti, Sebastian Ruder, William-Chandra Tjhi, Peerat Limkonchotiwat, Alham Fikri Aji, Sedrick Keh, Genta Indra Winata, Ruochen Zhang, Fajri Koto, Zheng Xin Yong, and Samuel Cahyawijaya. 2024. Seacrowd: A multilingual multimodal data hub and benchmark suite for southeast asian languages. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami FL, USA, November 12-16, 2024, pages 5155-5203. Association for Computational Linguistics.", + "bbox": [ + 510, + 493, + 884, + 885 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Anton Lozhkov, Raymond Li, Loubna Ben Allal, Federico Cassano, Joel Lamy-Poirier, Nouamane Tazi,", + "bbox": [ + 510, + 892, + 884, + 920 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Ao Tang, Dmytro Pykhtar, Jiawei Liu, Yuxiang Wei, Tianyang Liu, Max Tian, Denis Kocetkov, Arthur Zucker, Younes Belkada, Zijian Wang, Qian Liu, Dmitry Abulkhanov, Indraneil Paul, Zhuang Li, Wen-Ding Li, Megan Risdal, Jia Li, Jian Zhu, Terry Yue Zhuo, Evgenii Zheltonozhskii, Nii Osae Osae Dade, Wenhao Yu, Lucas Krauß, Naman Jain, Yixuan Su, Xuanli He, Manan Dey, Edoardo Abati, Yekun Chai, Niklas Muennighoff, Xiangru Tang, Muhtasham Oblokulov, Christopher Akiki, Marc Marone, Cheng-hao Mou, Mayank Mishra, Alex Gu, Binyuan Hui, Tri Dao, Armel Zebaze, Olivier Dehaene, Nicolas Patry, Canwen Xu, Julian J. McAuley, Han Hu, Torsten Scholak, Sébastien Paquet, Jennifer Robinson, Carolyn Jane Anderson, Nicolas Chapados, and et al. 2024. Starcoder 2 and the stack v2: The next generation. CoRR, abs/2402.19173.", + "bbox": [ + 132, + 85, + 489, + 307 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Yu Meng, Mengzhou Xia, and Danqi Chen. 2024. Simpo: Simple preference optimization with a reference-free reward. CoRR, abs/2405.14734.", + "bbox": [ + 115, + 316, + 489, + 357 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Xuan-Phi Nguyen, Wenxuan Zhang, Xin Li, Mahani Aljunied, Zhiqiang Hu, Chenhui Shen, Yew Ken Chia, Xingxuan Li, Jianyu Wang, Qingyu Tan, Liying Cheng, Guanzheng Chen, Yue Deng, Sen Yang, Chaoqun Liu, Hang Zhang, and Lidong Bing. 2024. SeaLLMs - large language models for Southeast Asia. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), pages 294-304, Bangkok, Thailand. Association for Computational Linguistics.", + "bbox": [ + 115, + 365, + 489, + 497 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "OpenAI. 2023. GPT-4 technical report. CoRR, abs/2303.08774.", + "bbox": [ + 115, + 506, + 487, + 532 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Guilherme Penedo, Hynek Kydlicek, Loubna Ben Allal, Anton Lozhkov, Margaret Mitchell, Colin Raffel, Leandro von Werra, and Thomas Wolf. 2024. The fine web datasets: Decanting the web for the finest text data at scale. CoRR, abs/2406.17557.", + "bbox": [ + 115, + 541, + 487, + 607 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Ivan Provilkov, Dmitrii Emelianenko, and Elena Voita. 2020. Bpe-dropout: Simple and effective subword regularization. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, ACL 2020, Online, July 5-10, 2020, pages 1882-1892. Association for Computational Linguistics.", + "bbox": [ + 115, + 615, + 489, + 708 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Jeff Rasley, Samyam Rajbhandari, Olatunjri Ruwase, and Yuxiong He. 2020. Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters. In KDD '20: The 26th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, Virtual Event, CA, USA, August 23-27, 2020, pages 3505-3506. ACM.", + "bbox": [ + 115, + 717, + 489, + 810 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. 2023. GPQA: A graduate-level google-proof q&a benchmark. CoRR, abs/2311.12022.", + "bbox": [ + 115, + 818, + 489, + 883 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Morgane Rivière, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, Léonard", + "bbox": [ + 115, + 892, + 487, + 921 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Hussenot, Thomas Mesnard, Bobak Shahriari, Alexandre Ramé, Johan Ferret, Peter Liu, Pouya Tafti, Abe Friesen, Michelle Casbon, Sabela Ramos, Ravin Kumar, Charline Le Lan, Sammy Jerome, Anton Tsitsulin, Nino Vieillard, Piotr Stanczyk, Sertan Girgin, Nikola Momchev, Matt Hoffman, Shantanu Thakoor, Jean-Bastien Grill, Behnam Neyshabur, Olivier Bachem, Alanna Walton, Aliaksei Severyn, Alicia Parrish, Aliya Ahmad, Allen Hutchison, Alvin Abdagic, Amanda Carl, Amy Shen, Andy Brock, Andy Coenen, Anthony Laforge, Antonia Paterson, Ben Bastian, Bilal Piot, Bo Wu, Brandon Royal, Charlie Chen, Chintu Kumar, Chris Perry, Chris Welty, Christopher A. Choquette-Choo, Danila Sinopalnikov, David Weinberger, Dimple Vijaykumar, Dominika Rogozinska, Dustin Herbison, Elisa Bandy, Emma Wang, Eric Noland, Erica Moreira, Evan Senter, Evgenii Eltsyshev, Francesco Visin, Gabriel Rasskin, Gary Wei, Glenn Cameron, Gus Martins, Hadi Hashemi, Hanna Klimczak-Plucinska, Harleen Batra, Harsh Dhand, Ivan Nardini, Jacinda Mein, Jack Zhou, James Svensson, Jeff Stanway, Jetha Chan, Jin Peng Zhou, Joana Carrasqueira, Joana Iljazi, Jocelyn Becker, Joe Fernandez, Joost van Amersfoort, Josh Gordon, Josh Lipschultz, Josh Newlan, Ju-yeong Ji, Kareem Mohamed, Kartikeya Badola, Kat Black, Katie Millican, Keelin McDonell, Kelvin Nguyen, Kiranbir Sodhia, Kish Greene, Lars Lowe Sjösund, Lauren Usui, Laurent Sifre, Lena Heuermann, Leticia Lago, and Lilly McNealus. 2024. Gemma 2: Improving open language models at a practical size. CoRR, abs/2408.00118.", + "bbox": [ + 526, + 85, + 884, + 502 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Sailor2 Team. 2024. Sailor2: Sailing in south-east asia with inclusive multilingual llm.", + "bbox": [ + 509, + 512, + 882, + 539 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Teven Le Scao, Angela Fan, Christopher Akiki, Ellie Pavlick, Suzana Ilic, Daniel Hesslow, Roman Castagné, Alexandra Sasha Luccioni, François Yvon, Matthias Galle, Jonathan Tow, Alexander M. Rush, Stella Biderman, Albert Webson, Pawan Sasanka Ammanamanchi, Thomas Wang, Benoit Sagot, Niklas Muennighoff, Albert Villanova del Moral, Olatunj Ruwase, Rachel Bawden, Stas Bekman, Angelina McMillan-Major, Iz Beltagy, Huu Nguyen, Lucile Saulnier, Samson Tan, Pedro Ortiz Suarez, Victor Sanh, Hugo Laurençon, Yacine Jernite, Julien Launay, Margaret Mitchell, Colin Raffel, Aaron Gokaslan, Adi Simhi, Aitor Soroa, Alham Fikri Aji, Amit Alfassy, Anna Rogers, Ariel Kreisberg Nitzav, Canwen Xu, Chenghao Mou, Chris Emezue, Christopher Klamm, Colin Leong, Daniel van Strien, David Ifeoluwa Adelani, and et al. 2022. BLOOM: A 176b-parameter open-access multilingual language model. CoRR, abs/2211.05100.", + "bbox": [ + 510, + 546, + 882, + 796 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "AI Singapore SEA-LION Team. 2024. Llama3 8b cpt sea-lionv2.1 instruct.", + "bbox": [ + 509, + 804, + 882, + 832 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Luca Soldaini, Rodney Kinney, Akshita Bhagia, Dustin Schwenk, David Atkinson, Russell Authur, Ben Bogin, Khyathi Chandu, Jennifer Dumas, Yanai Elazar, Valentin Hofmann, Ananya Jha, Sachin Kumar, Li Lucy, Xinxi Lyu, Nathan Lambert, Ian Magnusson, Jacob Morrison, Niklas Muennighoff,", + "bbox": [ + 509, + 841, + 882, + 921 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Aakanksha Naik, Crystal Nam, Matthew Peters, Abhilasha Ravichander, Kyle Richardson, Zejiang Shen, Emma Strubell, Nishant Subramani, Oyvind Tafjord, Evan Walsh, Luke Zettlemoyer, Noah Smith, Hannaneh Hajishirzi, Iz Beltagy, Dirk Groeneveld, Jesse Dodge, and Kyle Lo. 2024. Dolma: an open corpus of three trillion tokens for language model pretraining research. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 15725-15788, Bangkok, Thailand. Association for Computational Linguistics.", + "Zayne Sprague, Xi Ye, Kaj Bostrom, Swarat Chaudhuri, and Greg Durrett. 2024. Musr: Testing the limits of chain-of-thought with multistep soft reasoning. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net.", + "Yosephine Susanto, Adithya Venkatadri Hulagadri, Jann Railey Montalan, Jian Gang Ngui, Xian Bin Yong, Weiqi Leong, Hamsawardhini Rengarajan, Peerat Limkonchotiwat, Yifan Mai, and William Chandra Tjhi. 2025. Sea-helm: Southeast asian holistic evaluation of language models. Preprint, arXiv:2502.14301.", + "Mirac Suzgun, Nathan Scales, Nathanael Schärli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V. Le, Ed H. Chi, Denny Zhou, and Jason Wei. 2023. Challenging big-bench tasks and whether chain-of-thought can solve them. In *Findings of the Association for Computational Linguistics: ACL* 2023, Toronto, Canada, July 9-14, 2023, pages 13003-13051. Association for Computational Linguistics.", + "Sho Takase, Ryokan Ri, Shun Kiyono, and Takuya Kato. 2024. Large vocabulary size improves large language models. CoRR, abs/2406.16508.", + "Sailor Team. 2024. Sailor2: Sailing in south-east asia with inclusive multilingual llms.", + "The Mosaic ML Team. 2021. composer. https://github.com/mosaicml/composer/.", + "The Mosaic ML Team. 2022. Llm foundry. https://github.com/mosaicml/llm-foundry.", + "Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. 2024. Openmathinstruct-2: Accelerating AI for math with massive open-source instruction data. CoRR, abs/2410.01560.", + "Ahmet Üstün, Viraat Aryabumi, Zheng Xin Yong, WeiYin Ko, Daniel D'souza, Gbemileke Onilude, Neel Bhandari, Shivalika Singh, Hui-Lee Ooi, Amr Kayid, Freddie Vargus, Phil Blunsom, Shayne Longpre, Niklas Muennighoff, Marzieh Fadaee, Julia Kreutzer, and Sara Hooker. 2024. Aya model: An instruction finetuned open-access multilingual language model. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August" + ], + "bbox": [ + 115, + 85, + 489, + 920 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "11-16, 2024, pages 15894-15939. Association for Computational Linguistics.", + "Bin Wang, Zhengyuan Liu, Xin Huang, Fangkai Jiao, Yang Ding, AiTi Aw, and Nancy Chen. 2024a. Sealeval for multilingual foundation models: From crosslingual alignment to cultural reasoning. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), NAACL 2024, Mexico City, Mexico, June 16-21, 2024, pages 370-390. Association for Computational Linguistics.", + "Ke Wang, Nikolaos Dimitriadis, Guillermo Ortiz-Jiménez, François Fleuret, and Pascal Frossard. 2024b. Localizing task information for improved model merging and compression. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net.", + "Yubo Wang, Xueguang Ma, Ge Zhang, Yuansheng Ni, Abhranil Chandra, Shiguang Guo, Weiming Ren, Aaran Arulraj, Xuan He, Ziyan Jiang, Tianle Li, Max Ku, Kai Wang, Alex Zhuang, Rongqi Fan, Xiang Yue, and Wenhu Chen. 2024c. Mmlu-pro: A more robust and challenging multi-task language understanding benchmark. CoRR, abs/2406.01574.", + "Chris Wendler, Veniamin Veselovsky, Giovanni Monea, and Robert West. 2024. Do llamas work in english? on the latent language of multilingual transformers. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pages 15366-15394. Association for Computational Linguistics.", + "Mitchell Wortsman, Peter J. Liu, Lechao Xiao, Katie E. Everett, Alexander A. Alemi, Ben Adlam, John D. Co-Reyes, Izzeddin Gur, Abhishek Kumar, Roman Novak, Jeffrey Pennington, Jascha Sohl-Dickstein, Kelvin Xu, Jaehoon Lee, Justin Gilmer, and Simon Kornblith. 2024. Small-scale proxies for large-scale transformer training instabilities. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. Open-Review.net.", + "Zhangchen Xu, Fengqing Jiang, Luyao Niu, Yuntian Deng, Radha Poovendran, Yejin Choi, and Bill Yuchen Lin. 2024. Magpie: Alignment data synthesis from scratch by prompting aligned llms with nothing. CoRR, abs/2406.08464.", + "An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jianxin Yang, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize" + ], + "bbox": [ + 510, + 85, + 882, + 920 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Xuejing Liu, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, Zhifang Guo, and Zhihao Fan. 2024a. Qwen2 technical report. CoRR, abs/2407.10671.", + "Ziyi Yang, Fanqi Wan, Longguang Zhong, Tianyuan Shi, and Xiaojun Quan. 2024b. Weighted-reward preference optimization for implicit model fusion. CoRR, abs/2412.03187.", + "Wei Jie Yeo, Teddy Ferdinan, Przemyslaw Kazienko, Ranjan Satapathy, and Erik Cambria. 2024. Self-training large language models through knowledge detection. In *Findings of the Association for Computational Linguistics: EMNLP* 2024, Miami, Florida, USA, November 12-16, 2024, pages 15033-15045. Association for Computational Linguistics.", + "Le Yu, Bowen Yu, Haiyang Yu, Fei Huang, and Yongbin Li. 2024. Language models are super mario: Absorbing abilities from homologous models as a free lunch. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net.", + "Wenxuan Zhang, Hou Pong Chan, Yiran Zhao, Mahani Aljunied, Jianyu Wang, Chaoqun Liu, Yue Deng, Zhiqiang Hu, Weiwen Xu, Yew Ken Chia, Xin Li, and Lidong Bing. 2024a. Seallms 3: Open foundation and chat multilingual large language models for southeast asian languages. CoRR, abs/2407.19672.", + "Xulang Zhang, Rui Mao, and Erik Cambria. 2024b. Multilingual emotion recognition: Discovering the variations of lexical semantics between languages. In International Joint Conference on Neural Networks, IJCNN 2024, Yokohama, Japan, June 30 - July 5, 2024, pages 1-9. IEEE.", + "Yanli Zhao, Andrew Gu, Rohan Varma, Liang Luo, Chien-Chin Huang, Min Xu, Less Wright, Hamid Shojanazeri, Myle Ott, Sam Shleifer, Alban Desmaison, Can Balioglu, Pritam Damania, Bernard Nguyen, Geeta Chauhan, Yuchen Hao, Ajit Mathews, and Shen Li. 2023. Pytorch FSDP: experiences on scaling fully sharded data parallel. Proc. VLDB Endow., 16(12):3848-3860.", + "Wenzhen Zheng, Wenbo Pan, Xu Xu, Libo Qin, Li Yue, and Ming Zhou. 2024a. Breaking language barriers: Cross-lingual continual pre-training at scale. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pages 7725-7738. Association for Computational Linguistics.", + "Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. 2024b. Llamafactory: Unified efficient fine-tuning of $100+$ language models. In Proceedings of the" + ], + "bbox": [ + 115, + 85, + 489, + 920 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), Bangkok, Thailand. Association for Computational Linguistics.", + "Chengzhi Zhong, Fei Cheng, Qianying Liu, Junfeng Jiang, Zhen Wan, Chenhui Chu, Yugo Murawaki, and Sadao Kurohashi. 2024. Beyond english-centric llms: What language do multilingual language models think in? CoRR, abs/2408.10811.", + "Jeffrey Zhou, Tianjian Lu, Swaroop Mishra, Siddhartha Brahma, Sujoy Basu, Yi Luan, Denny Zhou, and Le Hou. 2023. Instruction-following evaluation for large language models. CoRR, abs/2311.07911." + ], + "bbox": [ + 510, + 85, + 882, + 277 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A Appendix", + "text_level": 1, + "bbox": [ + 114, + 84, + 238, + 99 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.1 Continued pre-training (CPT) data", + "text_level": 1, + "bbox": [ + 112, + 109, + 440, + 124 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Existing data: We utilize existing datasets as shown in Table 6 (HuggingFace Datasets).", + "bbox": [ + 112, + 130, + 766, + 145 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Other data: As shown in Table 6 (the other data section), the listed datasets contain data from a diverse range of domains, including news, books, articles, poems, etc.", + "bbox": [ + 112, + 146, + 880, + 178 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/d1fb51f7d5203701b1c1be87c57a86727b27c69725ee0cf2689b1ea2cdbdd506.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Continued Pre-training Data
Source (HuggingFace Datasets)LanguagesSize (Billions of Tokens)
bigcode/the-stack-v2-dedupCODE40
allenai/dolmaEN37.5
HuggingFaceFW/fineweb-eduEN7.5
aisingapore/SEA-PILE-v1SEA47.58
aisingapore/SEA-PILE-v2ID7
Source (Others)LanguagesSize (Billions of Tokens)
VinBigDataVI16
WangChanBERTaTH8.5
Others - ENEN5
Others - SEASEA30.92
", + "bbox": [ + 295, + 189, + 702, + 329 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 6: List of datasets for the continued pre-training stage.", + "bbox": [ + 292, + 338, + 700, + 354 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.2 Stage 1 IFT data", + "text_level": 1, + "bbox": [ + 114, + 380, + 295, + 395 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/55f574138c852425fa5e8074e6ad64c6e4c0ac613ddfd8d9c24174ce9df33553.jpg", + "table_caption": [ + "Stage 1 IFT Datasets" + ], + "table_footnote": [], + "table_body": "
Source (HuggingFace Datasets)LanguagesSize
BAAI/Infinity-InstructEN7,449,106
nvidia/OpenMathInstruct-2EN2,000,000
", + "bbox": [ + 334, + 423, + 662, + 461 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 7: List of datasets for Stage-1-IFT. For BAAI/Infinity-Instruct dataset, any conversation that originally ended with a user turn has had that last turn removed.", + "bbox": [ + 112, + 470, + 882, + 498 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.3 Stage 2 IFT data", + "text_level": 1, + "bbox": [ + 114, + 526, + 295, + 542 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Existing data: We utilize existing datasets as shown in Table 9 (HuggingFace Datasets).", + "bbox": [ + 112, + 546, + 766, + 562 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Synthetic data: As shown in Table 9 (the generated part), we describe how to formulate synthetic data as follows", + "bbox": [ + 112, + 563, + 880, + 592 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- qwen_gemma_synthetic datasets are generated first in English with Qwen 32B, utilizing an approach similar to Magpie. Instructions are then translated into the target language with Gemma 2 27B.", + "- Llama_gemma_synthetic datasets are generated first in English with Llama 3.1 70B, utilizing an approach similar to Magpie (Xu et al., 2024). Instructions are then translated into the target language with Gemma 2 27B.", + "- gemma_synthetic datasets are generated directly with Gemma 2 27B using Magpie (Xu et al., 2024).", + "- sea_multilingual_systemchat is a synthetic dataset translated with Gemma 2 27B from the English systemchat dataset.", + "- rewritten_oasst is a dataset rewritten with Gemma 2 27B based on the English OASST dataset.", + "- rewritten_helpsteer is a dataset rewritten with Gemma 2 27B based on the English Helpsteer dataset." + ], + "bbox": [ + 134, + 595, + 880, + 755 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.4 Helpfulness and preference alignment data", + "text_level": 1, + "bbox": [ + 112, + 766, + 500, + 782 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "As shown in Table 8, we use the princeton-nlp/gemma2-ultrafeedback-armorm as the source of the alignment data. We then further re-scored with the reward model, nvidia/Llama-3.1-Nemotron-70B-Reward to create the SEA version. In particular, generated-gemma2-27b-seapref-nemotron-70b takes prompts from seald, wangchan_thainstruct, and additional hand-written Southeast Asian cultural prompts collected from native speakers and then generates responses (with a varying temperature) from them with Gemma 2 27B. The responses are then scored with nvidia/Llama-3.1-Nemotron-70B-Reward, with the top-scoring response selected as chosen and vice versa, similar to princeton-nlp/gemma2-ultrafeedback-armorm.", + "bbox": [ + 112, + 788, + 884, + 915 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/dd8f45e05294968ff296d594ea86737670808790006a8a27a017ff220a05095b.jpg", + "table_caption": [ + "Preference Data" + ], + "table_footnote": [], + "table_body": "
Source (HuggingFace Datasets)LanguagesSize
princeton-nlp/gemma2-ultrafeedback-armormEN61,510
Source (Generated)LanguagesSize
generated-gemma2-27b-seapref-nemotron-70bSEA5,511
", + "bbox": [ + 310, + 142, + 690, + 191 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/db3a7dde14ce0b341033213c154d6a9aa45d69bffbec2d1963b8cbfc70cff39a.jpg", + "table_caption": [ + "Table 8: List of preference datasets used for the alignment stage.", + "Stage 2 IFT Datasets" + ], + "table_footnote": [], + "table_body": "
Source (HuggingFace Datasets)LanguagesSize
BAAI/Infinity-Instruct^*EN1,456,927
HuggingFaceTB/smoltalkEN409,537
allenai/tulu-3-sft-personas-mathEN149,960
parinzee/seed-free-synthetic-instruct-thai-v1TH118,898
HuggingFaceTB/smoltalkEN96,356
HuggingFaceTB/smoltalkEN83,144
arcee-ai/EvolKit-75KEN74,174
AI-MO/NuminaMath-TIREN72,441
Post-training-Data-Flywheel/AutoIF-instruct-61kEN61,492
argilla/ifeval-like-dataEN56,339
HuggingFaceTB/smoltalkEN53,342
ai2-adapt-dev/tulu_v3.9_wildjailbreak_decontaminated_50kEN50,000
ai2-adapt-dev/tulu_v3.9_synthetic_finalresp_wildguardmixtrain_decontaminated_50kEN50,000
allenai/tulu-3-sft-personas-math-gradeEN49,980
allenai/tulu-3-sft-personas-codeEN34,999
HuggingFaceTB/smoltalkEN34,424
allenai/tulu-3-sft-personas-instruction-followingEN29,980
airesearch/WangchanThaiInstructTH25,014
allenai/tulu-3-sft-personas-algebraEN20,000
arcee-ai/EvolKit-20k-viVI15,378
allenai/coconotEN10,983
ai2-adapt-dev/tulu_v3.9_scirff_10kEN10,000
Source (Generated)LanguagesSize
qwen_gemma_synthetic_tamilTA480,000
qwen_gemma_synthetic_thaiTH480,000
qwen_gemma_synthetic_indonesianID465,019
qwen_gemma_synthetic_vietnameseVI465,019
gemma_synthetic_indonesianID458,149
gemma_synthetic_filipinoTL455,093
gemma_synthetic_vietVI291,576
gemma_synthetic_tamilTA276,314
gemma_synthetic_thaiTH186,339
gemma_synthetic_javaneseJV110,000
gemma_synthetic_sudaneseSU110,000
llama_gemma_synthetic_thaiTH88,920
llama_gemma_synthetic_tamilTA88,920
llama_gemma_synthetic_vietnameseVI88,920
llama_gemma_synthetic_javaneseJV88,920
llama_gemma_synthetic_indonesianID88,920
llama_gemma_synthetic_filipinoTL80,000
enrich_27kSEA27,463
seaMultilingual_systemchatSEA1,903
rewritten_oasstSEA841
rewritten_helpsteerSEA838
", + "bbox": [ + 201, + 332, + 796, + 843 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 9: List of datasets for Stage-2-IFT.", + "bbox": [ + 359, + 853, + 636, + 866 + ], + "page_idx": 14 + } +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05747/ec2428c8-34ac-40c3-9d6a-d16ed6c46317_model.json b/data/2025/2504_05xxx/2504.05747/ec2428c8-34ac-40c3-9d6a-d16ed6c46317_model.json new file mode 100644 index 0000000000000000000000000000000000000000..406c6942e81eebaef161dbebed1111ed13b908df --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/ec2428c8-34ac-40c3-9d6a-d16ed6c46317_model.json @@ -0,0 +1,2507 @@ +[ + [ + { + "type": "image", + "bbox": [ + 0.165, + 0.083, + 0.231, + 0.128 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.239, + 0.096, + 0.829, + 0.117 + ], + "angle": 0, + "content": "SEA-LION: Southeast Asian Languages in One Network" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.128, + 0.871, + 0.294 + ], + "angle": 0, + "content": "Raymond Ng\\*, Thanh Ngan Nguyen\\*, Yuli Huang\\*, Ngee Chia Tai\\*, Wai Yi Leong\\*, Wei Qi Leong\\*, Xianbin Yong\\*, Jian Gang Ngui\\*, Yosephine Susanto\\*, Nicholas Cheng\\*, Hamsawardhini Rengarajan\\*, Peerat Limkonchotiwat\\*, Adithya Venkatadri Hulagadri\\*, Kok Wai Teng\\*, Yeo Yeow Tong\\*, Bryan Siow\\*, Wei Yi Teo\\*, Wayne Lau\\*, Choon Meng Tan\\*, Brandon Ong\\*, Zhi Hao Ong\\*, Jann Railey Montalan\\*, Adwin Chan\\*, Sajeban Antonyrex\\*, Ren Lee\\*, Esther Choa\\*, David Ong Tat-Wee\\*, Bing Jie Darius Liu\\*, William Chandra Tjhi\\*, Erik Cambria\\*, Leslie Teo\\* AI Singapore, National University of Singapore \\*Nanyang Technological University https://sea-lion.ai" + }, + { + "type": "title", + "bbox": [ + 0.261, + 0.31, + 0.341, + 0.324 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.334, + 0.461, + 0.69 + ], + "angle": 0, + "content": "Recently, Large Language Models (LLMs) have dominated much of the artificial intelligence scene with their ability to process and generate natural languages. However, the majority of LLM research and development remains English-centric, leaving low-resource languages such as those in the Southeast Asian (SEA) region underrepresented. To address this representation gap, we introduce Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, two cutting-edge multilingual LLMs designed for SEA languages. The SEA-LION family of LLMs supports 11 SEA languages, namely English, Chinese, Indonesian, Vietnamese, Malay, Thai, Burmese, Lao, Filipino, Tamil, and Khmer. Our work leverages large-scale multilingual continued pre-training with a comprehensive post-training regime involving multiple stages of instruction fine-tuning, alignment, and model merging. Evaluation results on multilingual benchmarks show that our models achieve state-of-the-art performance across LLMs supporting SEA languages. We open-source the models to benefit the wider SEA community." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.698, + 0.26, + 0.713 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.723, + 0.49, + 0.901 + ], + "angle": 0, + "content": "Large language models (LLMs) have significantly transformed the field of natural language processing, achieving remarkable performance in text generation, summarization and sentiment analysis (Brown et al., 2020; OpenAI, 2023; Dubey et al., 2024; Rivière et al., 2024; Zhang et al., 2024b; Yeo et al., 2024). Despite their impressive capabilities, most LLMs remain heavily English-centric (Wendler et al., 2024; Zhong et al., 2024). Unfortunately, this situation has led LLMs in regions with many under-represented languages such" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.31, + 0.885, + 0.422 + ], + "angle": 0, + "content": "as Southeast Asia (SEA) to suffer. Languages with lower resources, such as Filipino, Lao, Burmese and Khmer in the SEA region, are not supported by many open-source English-centric LLMs. This underscores the need to bridge the resource and representation gap between English and SEA languages." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.426, + 0.885, + 0.763 + ], + "angle": 0, + "content": "Recently, there have been many attempts to create multilingual LLMs in an open-source manner, e.g., BLOOM (Scao et al., 2022), a project aimed at increasing multilingual presence in opensource LLMs by supporting 46 languages. Popular LLM families such as Llama (Dubey et al., 2024), Gemma (Rivière et al., 2024) and Qwen (Yang et al., 2024a) have also introduced multilingual LLMs for their latest iteration. During our evaluations, we found that the performance of these models is acceptable in the general case, i.e., when considering evaluation benchmarks formulated from English datasets. However, we observe that the performance degrades on SEA-specific benchmarks. Moreover, researchers have also introduced LLMs such as SeaLLMs (Nguyen et al., 2024; Zhang et al., 2024a) and Sailor (Dou et al., 2024) to specifically address the LLM gap in SEA languages. However, the performance of these models is less than ideal for languages such as Thai or Tamil\\(^{2}\\) (10X et al., 2024; AI Products Team, 2024)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.767, + 0.885, + 0.879 + ], + "angle": 0, + "content": "In this paper, we address the issues by proposing a robust open-source Southeast Asian model with data transparency for reproducibility, namely SEA-LION - a family of LLMs continued pretrained (CPT) and fine-tuned on Llama-3.1-8B-Instruct for Llama-SEA-LION-8B-IT and Gemma2-9B for Gemma-SEA-LION-9B-IT with a focus" + }, + { + "type": "page_footnote", + "bbox": [ + 0.137, + 0.907, + 0.332, + 0.92 + ], + "angle": 0, + "content": "\\(^{1}\\)SEA-LION Models Collection" + }, + { + "type": "page_footnote", + "bbox": [ + 0.509, + 0.895, + 0.883, + 0.922 + ], + "angle": 0, + "content": "2Tamil is one of the official languages in Singapore. It is also spoken in other areas in the SEA region, such as Malaysia." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.291, + 0.061, + 0.706 + ], + "angle": 270, + "content": "arXiv:2504.05747v4 [cs.CL] 30 Oct 2025" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.49, + 0.325 + ], + "angle": 0, + "content": "on SEA languages. To tackle the performance problem, we utilize 200 billion English, code, and SEA languages tokens as well as 16.8 million English and SEA languages instruction and answer pairs for CPT and post-training steps, respectively, to achieve a significant improvement in SEA languages. In order to allow our models to be used by everyone without restrictions, we release our models under the fully open MIT license. We benchmark our models against the SEA-HELM(Susanto et al., 2025) and Open LLM Leaderboard3 with other LLMs of similar sizes in Southeast Asia like Sailor 2 (Team, 2024) and SeaLLMs 3 (Zhang et al., 2024a), where our models achieve state-of-the-art performances." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.327, + 0.489, + 0.358 + ], + "angle": 0, + "content": "We summarize the contribution of our paper as follows." + }, + { + "type": "text", + "bbox": [ + 0.12, + 0.36, + 0.49, + 0.424 + ], + "angle": 0, + "content": "- We released two LLMs, Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, that are meticulously trained to accurately represent the unique linguistic diversity of SEA languages." + }, + { + "type": "text", + "bbox": [ + 0.121, + 0.425, + 0.488, + 0.472 + ], + "angle": 0, + "content": "- We also provide in-depth insights in this paper into our end-to-end training workflow to benefit the community developing multilingual LLMs." + }, + { + "type": "text", + "bbox": [ + 0.121, + 0.473, + 0.49, + 0.634 + ], + "angle": 0, + "content": "- We present a reproducible dataset development process, covering sourcing and the model training process. We release our training artifacts, including the training dataset, training scripts, training checkpoints, and fine-tuned models, including Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, to provide strong baselines, promote reproducibility, and enable future research on applications that require SEA-specific knowledge4." + }, + { + "type": "list", + "bbox": [ + 0.12, + 0.36, + 0.49, + 0.634 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.665, + 0.411, + 0.683 + ], + "angle": 0, + "content": "2 Continued pre-training (CPT)" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.693, + 0.298, + 0.708 + ], + "angle": 0, + "content": "2.1 Pre-training data" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.714, + 0.49, + 0.859 + ], + "angle": 0, + "content": "The CPT data consists of a curated set of English, multilingual, and code corpora from several open source repositories like Dolma (Soldaini et al., 2024), FineWeb (Penedo et al., 2024), the-stackv2 (Lozhkov et al., 2024), SEA-LION-Pile (AI Singapore, 2023), SEA-LION-Pilev2 (AI Singapore, 2025), as well as documents from CommonCrawl (CommonCrawl, 2024) and from the public domain, such as Wikipedia (Foun" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.886, + 0.182 + ], + "angle": 0, + "content": "dation, 2024). For SEA-LION-Pilev2, we filter CommonCrawl WARC data for documents in SEA languages (i.e., Burmese, Simplified Chinese, Indonesian, Khmer, Lao, Malay, Filipino, Tamil, Thai, and Vietnamese) using the pretrained fast-text language classifier (Joulin et al., 2017)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.193, + 0.886, + 0.419 + ], + "angle": 0, + "content": "A document is retained if the language code reported in its metadata matches that of one of the aforementioned SEA languages. Additionally, we further clean up the data with Trafilatura (Barbaresi, 2021). To determine the optimal dataset ratio between SEA languages, code, and English for the CPT process, we conduct a series of small-scale CPT experiments, each with a training budget of 10 billion tokens and varying proportions of English, code, and SEA language data. We settled on an optimal data mix ratio of \\(55\\%\\) SEA languages, \\(25\\%\\) English, and \\(20\\%\\) code tokens for a budget of 200 billion tokens. For a detailed breakdown of the token count by languages, please refer to Table 6." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.466, + 0.657, + 0.483 + ], + "angle": 0, + "content": "2.2 CPT process" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.509, + 0.884, + 0.556 + ], + "angle": 0, + "content": "Model selection. For the models to CPT from, we choose Llama-3.1-8B-Instruct (Dubey et al., 2024) and Gemma-2-9B (Rivière et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.568, + 0.886, + 0.923 + ], + "angle": 0, + "content": "Training setup. Following previous works (Dou et al., 2024), we use BPE-Dropout (Provilkov et al., 2020) to increase the performance and robustness of the training. We use a Warmup-Stable-Decay (WSD) (Hu et al., 2024) scheduler with warm-up and cooldown phases each representing \\(10\\%\\) of the entire training budget. We use the AdamW (Loshchilov and Hutter, 2019) optimizer with the maximum learning rate (LR) set to \\(1e^{-5}\\) and the final LR after cooldown is \\(1e^{-7}\\). Following Wortsman et al. (2024), we set epsilon to \\(1e^{-15}\\). We use Composer (Team, 2021) and LLM Foundry (Team, 2022) for distributed training using Fully Sharded Data Parallel (Zhao et al., 2023) on a cluster of eight nodes of the p5.48xlarge instance from Amazon Web Services (AWS). The total training duration was approximately 6 days and 10 days for the Llama 3.1 and Gemma 2 models, respectively. In this paper, we refer to the post-CPT models as Llama-SEA-LION-8B and Gemma-SEA-LION-9B for the Llama 3.1 and Gemma 2 continued pre-trained models, respectively." + }, + { + "type": "page_footnote", + "bbox": [ + 0.136, + 0.87, + 0.295, + 0.884 + ], + "angle": 0, + "content": "3Open LLM Leaderboard" + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.884, + 0.488, + 0.92 + ], + "angle": 0, + "content": "4Please visit https://huggingface.co/aisingapore for all artifacts in this paper, including training data and other versions of SEA-LION" + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.87, + 0.488, + 0.92 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.085, + 0.264, + 0.101 + ], + "angle": 0, + "content": "3 Post-training" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.12, + 0.303, + 0.135 + ], + "angle": 0, + "content": "3.1 Post-training data" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.148, + 0.488, + 0.213 + ], + "angle": 0, + "content": "The post-training data consists of 3 subsets of data for Stage 1 IFT, Stage 2 IFT, and the Preference dataset for alignment, respectively. We describe the training data information of each step as follows." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.216, + 0.488, + 0.345 + ], + "angle": 0, + "content": "Stage 1 IFT. In this step, we employ Infinity-Instruct [Foundation and Chat] (Beijing Academy of Artificial Intelligence, 2024) and OpenMath-Instruct 2 (Toshniwal et al., 2024) to improve the mathematical, reasoning, and coding skills of the instruction model. The full details of the training data are shown in Appendix 7." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.349, + 0.488, + 0.509 + ], + "angle": 0, + "content": "Stage 2 IFT. Then, in this step, we use generalized large-scale instructions on the previous instruction model. In particular, we employ 22 existing datasets (written in English, Thai, and Vietnamese) and formulate new 22 synthetic datasets using various models and techniques to create SEA instruction datasets (see Appendix A.3 for the full data generation details). As shown in Appendix 9, we use a total of 7,298,828 instruction samples that cover 11 languages." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.514, + 0.489, + 0.626 + ], + "angle": 0, + "content": "Helpfulness and preference alignment. We also conduct an alignment learning on top of the instruction model using a feedback dataset called UltraFeedback (Cui et al., 2024). In addition, we also synthesized the SEA version of the UltraFeedback using NemoTron-70b with Gemma2 as a reward model, see Appendix A.4 for the full details." + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.645, + 0.482, + 0.827 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.837, + 0.49, + 0.91 + ], + "angle": 0, + "content": "Figure 1: Training process of Llama-SEA-LION-8B-IT (Section 3.2.1). The post-training process consists of 2 stages of instruction fine-tuning, an alignment stage and multiple merge stages. Dotted lines denote a merge stage and solid lines denote an alignment stage." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.085, + 0.722, + 0.101 + ], + "angle": 0, + "content": "3.2 Post-training process" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.106, + 0.884, + 0.283 + ], + "angle": 0, + "content": "We use LLaMaFactory (Zheng et al., 2024b) with DeepSpeed (Rasley et al., 2020) for all Instruction Fine Tuning (IFT) and alignment steps. All IFT stages are performed using full model finetuning, where the models are from the previous step (Section 2.2) and existing models. We use MergeKit (Goddard et al., 2024) with a value of 1 for weight and density parameters for all merge steps. Models selected for merging are selected empirically, based on the openness of model licenses, the suitability for merging and performance." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.291, + 0.771, + 0.306 + ], + "angle": 0, + "content": "3.2.1 Llama-SEA-LION-8B-IT" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.31, + 0.884, + 0.469 + ], + "angle": 0, + "content": "Stage 1 IFT As shown in Figure 1, we started off the post-training phase with IFT of Llama-SEA-LION-8B with the Infinity Instruct (Foundation) (Beijing Academy of Artificial Intelligence, 2024) and OpenMathInstruct2 (Toshniwal et al., 2024) datasets. Both datasets contain approximately 9.5 million instruction pairs, primarily in English and centered around reasoning, math, and code. We refer to the model at this stage as Stage1-Llama." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.471, + 0.884, + 0.663 + ], + "angle": 0, + "content": "Stage 2 IFT We performed a second round of IFT using the SEA-Instruct dataset, which consists of approximately 7.3 million instruction pairs, of which 5 million instruction pairs are generated using the Gemma-2-27B-Instruct (Rivière et al., 2024) model and the Qwen2.5-32B-Instruct model (Yang et al., 2024a) in SEA languages. The remaining are English language instruction pairs from the Infinity-Instruct (Chat) (Beijing Academy of Artificial Intelligence, 2024) dataset. We refer to the model at this stage as Stage-2-Llama." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.664, + 0.883, + 0.761 + ], + "angle": 0, + "content": "First merge After finishing the IFT stages, we performed the first of a series of merges by merging Stage-1-Llama and Stage-2-Llama into the Llama-SEA-LION-8B using the DARE TIES (Yu et al., 2024; Ilharco et al., 2023) method. We refer to the model at this stage as Merge-1-Llama." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.761, + 0.884, + 0.922 + ], + "angle": 0, + "content": "Second merge In order to mitigate catastrophic forgetting due to the fine-tuning process (Alexandrov et al., 2024), we performed the second round of merging by merging top-performing instruction-tuned models that share the Llama 3.1 lineage. We merge the original Llama-3.1-8B-Instruct, Llama3-8B-SEA-LION-v2.1-Instruct (SEA-LION Team, 2024), and SuperNova-Lite (Arcee-AI, 2024) into Merge-1-Llama using the Consensus TA (Wang et al., 2024b; Ilharco et al., 2023) merge method." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.49, + 0.116 + ], + "angle": 0, + "content": "We refer to the model at this stage as Merge-2-Llama." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.118, + 0.49, + 0.199 + ], + "angle": 0, + "content": "Helpfulness and preference alignment We performed one round of alignment on Merge-2-Llama using SimPO (Meng et al., 2024) with the SEA-Preference dataset. We refer to the model at this stage as Aligned-SimPO-Llama." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.2, + 0.491, + 0.295 + ], + "angle": 0, + "content": "Final merge Lastly, we perform a merge using the DELLA-Linear merge. With the original Llama3.1-8B-Instruct model as the base for merging, we merge in Merge-2-Llama and Aligned-SIMPO-Llama to produce the final model, Llama-SEA-LION-v3-9B-IT." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.326, + 0.386, + 0.34 + ], + "angle": 0, + "content": "3.2.2 Gemma-SEA-LION-9B-IT" + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.356, + 0.486, + 0.539 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.549, + 0.49, + 0.621 + ], + "angle": 0, + "content": "Figure 2: Training process of Gemma-SEA-LION-9B-IT (Section 3.2.2). The post-training process comprises two stages of instruction fine-tuning, an alignment stage, and multiple merge stages. Dotted lines denote a merge stage and solid lines denote an alignment stage." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.632, + 0.49, + 0.728 + ], + "angle": 0, + "content": "Stage 1 and Stage 2 IFT Similar to the Llama-SEA-LION-8B-IT, we started off the post-training phase with both stages of IFT using the same datasets on the Gemma-2-9B model (Rivière et al., 2024). We refer to both models at stage 1 and stage 2 as Stage-1-Gemma and Stage-2-Gemma, respectively." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.729, + 0.49, + 0.793 + ], + "angle": 0, + "content": "First merge We merge the Gemma-2-9B-IT (Rivière et al., 2024) and Stage-2-Gemma into Gemma-2-9B using the DELLA Linear method. We refer to the model at this stage as the Merge-1-Gemma." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.794, + 0.49, + 0.873 + ], + "angle": 0, + "content": "Helpfulness and preference alignment Using the Merge-1-Gemma as the base model, we performed one round of alignment using SimPO with the SEA-Preference dataset. We refer to the model at this stage as the Aligned-SimPO-Gemma." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.874, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Final merge Finally, using the Gemma-2-9B model as the base model, we merged Merge-1-Gemma, FuseChat Gemma-2-9B-Instruct (Yang" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.133 + ], + "angle": 0, + "content": "et al., 2024b), Gemma-SEA-LION-9B, and Aligned-SimPO-Gemma into it to produce the final model Gemma-SEA-LION-9B-IT." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.147, + 0.64, + 0.161 + ], + "angle": 0, + "content": "3.3 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.169, + 0.885, + 0.282 + ], + "angle": 0, + "content": "This post-training workflow emphasizes the careful balance between general capabilities, SEA-specific linguistic fluency, and natural conversational abilities. Each step in the workflow is designed to progressively refine the model, ensuring it meets the diverse needs of users in the Southeast Asian region." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.283, + 0.885, + 0.396 + ], + "angle": 0, + "content": "The entire post-training process for Gemma-SEA-LION-9B-IT and Llama-SEA-LION-8B-IT took approximately 1350 and 1024 GPU hours, respectively, on eight H100 GPUs. To make the training efficient, all post-training steps utilize Liger Kernel (Hsu et al., 2024) for substantial memory savings of approximately \\(60\\%\\)." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.41, + 0.719, + 0.427 + ], + "angle": 0, + "content": "4 Experimental Setup" + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.438, + 0.724, + 0.454 + ], + "angle": 0, + "content": "4.1 Competitive methods" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.46, + 0.885, + 0.589 + ], + "angle": 0, + "content": "For the evaluation, we compared our models against well-known LLMs for multilingual and SEA languages, such as SeALMsv3 (Zhang et al., 2024a), Sailorv2 (Team, 2024), Qwen 2.5 (Yang et al., 2024a), Gemma 2 (Riviere et al., 2024) and Llama 3.1 (Dubey et al., 2024), where the parameters of those models are less than 10 billion parameters, similar to our models." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.603, + 0.746, + 0.617 + ], + "angle": 0, + "content": "4.2 Evaluation Benchmarks" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.626, + 0.884, + 0.672 + ], + "angle": 0, + "content": "To evaluate the robustness of our proposed models, we compare our models to competitors in three benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.675, + 0.885, + 0.884 + ], + "angle": 0, + "content": "SEA Benchmarks. We evaluated the multilingual performance of each LLM using the SEA-HELM Leaderboard (Leong et al., 2023; Susanto et al., 2025) \\(^{5}\\). We selected SEA-HELM because the design choice of this benchmark reflects the performance of SEA culture and knowledge the most compared with other existing benchmarks (DAMO-NLP-SG, 2024; Lovenia et al., 2024; Wang et al., 2024a). We also evaluate on a wide-range SEA coverage language benchmark called SEACrowd (Lovenia et al., 2024). This benchmark consists of all SEA languages for natural language understanding and generation datasets." + }, + { + "type": "page_footnote", + "bbox": [ + 0.509, + 0.895, + 0.884, + 0.922 + ], + "angle": 0, + "content": "5Please visit https://leaderboard.sea-lion.ai/ for live score update of SEA-LION." + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.445, + 0.084, + 0.552, + 0.098 + ], + "angle": 0, + "content": "SEA-HELM" + }, + { + "type": "table", + "bbox": [ + 0.121, + 0.1, + 0.878, + 0.259 + ], + "angle": 0, + "content": "
NU, NLG, NLR, NLIInstruction Following
ModelsAverageIDVITHTAIDVITH
Meta-Llama-3.1-8B35.3742.3340.6735.1338.8816.1919.059.00
SeaLLMs-v3-7B37.0444.7948.2943.5327.4526.6735.2426.00
Gemma-2-9B41.4847.6543.2842.0053.264.763.8110.00
Qwen2.5-7B41.9851.6352.1746.5536.6031.4336.1930.00
Sailor2-8B42.6253.2347.3346.6445.0430.4830.4835.00
Llama-SEA-LION-8B41.4244.9846.2542.7943.0325.7132.3823.00
Gemma-SEA-LION-9B48.6757.1649.3947.1660.5625.7120.0027.00
" + }, + { + "type": "table_caption", + "bbox": [ + 0.123, + 0.266, + 0.876, + 0.297 + ], + "angle": 0, + "content": "Table 1: SEA-HELM multilingual benchmark on NLU, NLG, NLR, NLI and instruction following on base and continued pre-trained models of similar sizes." + }, + { + "type": "table_caption", + "bbox": [ + 0.409, + 0.311, + 0.589, + 0.324 + ], + "angle": 0, + "content": "Open LLM Leaderboard" + }, + { + "type": "table", + "bbox": [ + 0.121, + 0.324, + 0.878, + 0.443 + ], + "angle": 0, + "content": "
ModelsAverageMMLU-PROBBHGPQAMATH Lvl 5IFEval (EN)MUSR
Meta-Llama-3.1-8B13.924.9525.296.325.1412.78.98
Sailor2-8B17.7125.7427.624.877.0221.9519.03
Gemma-2-9B21.1534.4834.110.5113.1420.414.3
SeaLLMs-v3-7B24.0035.7134.579.2818.8132.9412.68
Qwen2.5-7B24.9937.3935.819.9618.8833.7414.14
Llama-SEA-LION-8B16.6127.626.047.499.8916.5612.07
Gemma-SEA-LION-9B22.4132.7837.2410.299.8930.1214.11
" + }, + { + "type": "table_caption", + "bbox": [ + 0.131, + 0.452, + 0.863, + 0.468 + ], + "angle": 0, + "content": "Table 2: Open LLM Leaderboard benchmarks across different continued pre-trained models of similar sizes." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.494, + 0.49, + 0.573 + ], + "angle": 0, + "content": "However, due to maintenance reasons, we cannot reproduce the NLG benchmark of SEACrowd. Therefore, we experiment only with the NLU benchmark (zero-shot), which has 131 data subsets, 7 tasks, and 31 SEA indigenous languages." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.574, + 0.49, + 0.816 + ], + "angle": 0, + "content": "English performance. We also evaluated the English performance of the models using the Open LLM Leaderboard (HuggingFace, 2024). This is because English is also widely used in SEA countries. Therefore, we need to evaluate the understanding and knowledge of LLMs in the English benchmark as well. The leaderboard consists of six benchmarks, IFEval (Zhou et al., 2023), Big Bench Hard (Suzgun et al., 2023), MATH (Hendrycks et al., 2021), GPQA (Rein et al., 2023), MuSR (Sprague et al., 2024) and MMLUPRO (Wang et al., 2024c). Moreover, we also evaluate the CPT models on SEA-HELM and the Open LLM Leaderboard since these benchmarks support the CPT evaluation." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.83, + 0.336, + 0.846 + ], + "angle": 0, + "content": "5 Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.857, + 0.49, + 0.922 + ], + "angle": 0, + "content": "To understand the robustness and generalization of our proposed models, we conduct three studies as follows. Section 5.1 evaluates the robustness of continual pre-training models using SEA-HELM" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.493, + 0.884, + 0.573 + ], + "angle": 0, + "content": "and the Open LLM leaderboard. In Section 5.2, we compare our instruction fine-tuning models with competitors in three benchmarks to demonstrate the generalization of our models. Lastly, we discuss the design choice of our models in Section 5.3." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.591, + 0.807, + 0.607 + ], + "angle": 0, + "content": "5.1 Continued Pre-Training Results" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.616, + 0.885, + 0.922 + ], + "angle": 0, + "content": "SEA performance. The CPT stage is primarily focused on gaining SEA language capabilities and knowledge. For the purpose of comparison against base and CPT models, as shown in Table 1, we observed a 6.05 and 7.19 average SEA-HELM performance increase over the Meta-Llama-3.1-8B and Gemma-2-9B for Llama-SEA-LION-8B and Gemma-SEA-LION-9B, respectively. We observed a much larger average increase with instruction following capabilities in particular, which we attribute to the fact that our CPT models are trained from the instruction models rather than from the base models. Moreover, in the average performance, we found that our Gemma-SEA-LION-9B models perform the best compared to other models. This emphasizes a strong reason to perform CPT for improving the performance of SEA languages, rather than skipping the CPT and performing SFT directly." + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.457, + 0.084, + 0.542, + 0.094 + ], + "angle": 0, + "content": "SEA-HELM" + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.096, + 0.88, + 0.338 + ], + "angle": 0, + "content": "
NUL, NLG, NLR, NLIInstruction FollowingMTBench
ModelsAverageIDVITHTAIDVITHIDVITH
SeaLLMs-v3-7B-Chat39.1942.7248.5042.5912.0657.1453.3347.0059.8165.2456.59
Llama-3.1-8B-Instruct41.4851.5051.3145.3215.4077.1475.2463.0056.3857.5954.34
Sailor2-8B-Chat43.1348.9848.0145.4428.2949.5245.7140.0069.7666.9773.94
Qwen2.5-7B-Instruct44.5860.2853.4653.4321.0381.9069.5266.0065.6666.8068.71
Gemma-2-9B-IT55.3364.0459.8657.2252.2888.5778.1071.0068.7868.3773.51
Stage-1-Llama50.7651.8451.8346.2327.5369.5273.3359.0042.7446.4146.46
Stage-2-Llama59.4953.8755.1850.9244.8077.1476.1967.0050.9053.7246.97
Merge-1-Llama59.3656.7356.8251.7146.6381.9082.8667.0057.0454.0150.28
Merge-2-Llama58.0159.1952.6351.8935.4087.6280.9578.0056.3859.3258.86
Aligned-SimPO-Llama51.3054.8651.6946.7726.4082.8680.0068.0068.2064.6864.92
Llama-SEA-LION-8B-IT61.8460.5061.4855.9243.6184.7685.7176.0062.6568.3265.13
Stage-1-Gemma56.5655.0654.5151.9642.7466.6774.2961.0047.3547.2655.05
Stage-2-Gemma66.6664.1061.7656.9057.8589.5282.8676.0060.5458.9358.76
Merge-1-Gemma69.2666.2564.9559.7460.4189.5291.4382.0066.4564.4765.00
Aligned-SimPO-Gemma69.3765.6965.4759.5157.3886.6788.5778.0068.8973.6773.51
Gemma-SEA-LION-9B-IT69.3566.2664.9359.2358.8294.2988.5778.0065.8573.2769.07
" + }, + { + "type": "table_caption", + "bbox": [ + 0.115, + 0.348, + 0.882, + 0.376 + ], + "angle": 0, + "content": "Table 3: SEA-HELM multilingual benchmark on NLU, NLG, NLR, NLI, instruction following and multi-turn chat on instruct models of similar sizes." + }, + { + "type": "image", + "bbox": [ + 0.122, + 0.392, + 0.688, + 0.542 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.142, + 0.555, + 0.663, + 0.571 + ], + "angle": 0, + "content": "Figure 3: Zero-shot model performance across NLU tasks in SEA languages." + }, + { + "type": "table", + "bbox": [ + 0.704, + 0.416, + 0.88, + 0.511 + ], + "angle": 0, + "content": "
ModelNLU Score
SeaLLMs-v3-7B-chat52.68
Llama-3.1-8B-Instruct49.94
Sailor2-8B-Chat60.21
Qwen2.5-7B-Instruct54.51
Gemma-2-9B-IT60.21
Llama-SEA-LION-8B-IT55.10
Gemma-SEA-LION-9B-IT64.13
" + }, + { + "type": "table_caption", + "bbox": [ + 0.701, + 0.52, + 0.884, + 0.577 + ], + "angle": 0, + "content": "Table 4: The average NLU performance across 131 data subsets and 31 indigenous languages." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.596, + 0.49, + 0.803 + ], + "angle": 0, + "content": "English performance. For the English performance, as shown in Table 2, both CPT models also managed to perform competitively against the Meta-Llama-3.1-8B and Gemma-2-9B base models on the Open LLM Leaderboard benchmarks. This indicates that our choice of retraining with a proportion of \\(25\\%\\) English tokens has been beneficial in mitigating catastrophic forgetting, which has been shown to stem from CPT (Zheng et al., 2024a). Although our CPT models perform lower than Qwen and SeaLLMs on this benchmark, we outperform them on the SEA language instead, which is the main focus of this work." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.818, + 0.407, + 0.833 + ], + "angle": 0, + "content": "5.2 Instruction Fine-tuning Results" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.841, + 0.489, + 0.887 + ], + "angle": 0, + "content": "In this study, we compare our models with competitors on SEA-HELM, SEACrowd, and the Open LLM Leaderboard as follows." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.89, + 0.49, + 0.922 + ], + "angle": 0, + "content": "SEA-HELM. As shown in Table 3, the SEA-HELM benchmark performance demonstrates that" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.595, + 0.885, + 0.901 + ], + "angle": 0, + "content": "our instruct models, Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, attain competitive performance in SEA languages, with Gemma-SEA-LION-9B-IT achieving one of the highest average performances. Moreover, we significantly improve the performance of Llama-3.1-8B-Instruct from 41.48 to 61.84 using Llama-SEA-LION-8B-IT, while Gemma-SEA-LION-9B-IT achieves 14.02 improvement points compared to Gemma-2-9B-IT. Both Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT outperform other SEA languages-focused LLMs, such as *Sailor2-8B-Chat* and *SEALLMs-v3-7B-Chat*, with an average score of 69.35 across all the languages covered by the SEAHELM benchmark, apart from the SEA-MTbench tasks. This conforms with the previous results on the CPT models (Section 5.1) that our CPT model performs the best on SEA languages, resulting in the best performer in this experiment." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.906, + 0.883, + 0.922 + ], + "angle": 0, + "content": "SEACrowd. Other than evaluating on some SEA" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.412, + 0.084, + 0.586, + 0.096 + ], + "angle": 0, + "content": "Open LLM Leaderboard" + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.096, + 0.878, + 0.338 + ], + "angle": 0, + "content": "
ModelsAverageMMLU-PROBBHGPQAMATH Lvl 5IFEval (EN)MUSR
Sailor2-8B-Chat16.3727.9327.153.470.0037.492.19
SeaLLMs-v3-7B-Chat22.4933.9324.377.2715.8644.109.38
Llama-3.1-8B-Instruct27.8829.3626.1010.6317.4577.036.75
Qwen2.5-7B-Instruct27.9337.0034.7210.180.0076.349.34
Gemma-2-9B-IT28.8631.9542.1414.770.2374.369.74
Stage-1-Llama24.5125.8726.327.8319.2662.894.88
Stage-2-Llama27.7528.1024.647.7219.5678.787.74
Merge-1-Llama27.4927.4726.228.2819.7976.167.04
Merge-2-Llama29.9629.9228.789.9619.9482.618.54
Aligned-SimPO-Llama30.5830.8434.318.3926.5975.767.61
Llama-SEA-LION-8B-IT30.3931.0129.4710.4022.5880.358.54
Stage-1-Gemma29.8833.3438.5110.7424.1756.8715.66
Stage-2-Gemma33.4834.6736.0611.7420.7783.0014.61
Merge-1-Gemma35.1536.2241.4215.3226.2882.099.59
Aligned-SimPO-Gemma35.3137.6542.3814.9927.7980.238.82
Gemma-SEA-LION-9B-IT35.4336.9443.3915.1024.2481.8511.07
" + }, + { + "type": "table_caption", + "bbox": [ + 0.181, + 0.348, + 0.816, + 0.362 + ], + "angle": 0, + "content": "Table 5: Open LLM Leaderboard benchmarks across different instruct models of similar sizes." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.388, + 0.488, + 0.709 + ], + "angle": 0, + "content": "languages like SEA-HELM, we also evaluated our model compared to competitors on 31 SEA indigenous languages using SEACrowd-NLU. Note that, for this study, we use only the best settings of our models from the previous experiment (Table 3). As shown in Table 4, we observe a state-of-the-art result from Gemma-SEA-LION-9B-IT by achieving 64.13 points on the NLU benchmark, while Llama-SEA-LION-8B-IT improves its baseline from 49.94 to 55.10 points. Moreover, the results from Figure 3 also emphasize the robustness of our model by reaching more than 80 points on this benchmark, while SeaLLMs and Llama-3.1 have only a few cases where the performance exceeds 80 points. These results emphasize the robustness of our models by achieving the state-of-the-art with a model parameter less than 10B on SEA benchmarks, including both traditional classical NLP benchmark (SEACrowd-NLU) and modern LLM benchmark (SEA-HELM)." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.713, + 0.488, + 0.921 + ], + "angle": 0, + "content": "English performance. We also evaluate the performance of a widely used language, English, to observe a difference between the results of SEA and English. The Open LLM Leaderboard performance is shown in Table 5. Both Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT performed competitively in English language, math, and reasoning tasks, with Gemma-SEA-LION-9B-IT achieving the highest average score of 35.43. Moreover, we notice that the SEA models (Sailor and SeaLLMs) failed to perform on the English dataset. This might be because these models are optimized for SEA languages during supervised fine-tuning, and English" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.389, + 0.882, + 0.45 + ], + "angle": 0, + "content": "performance decreased as a result. In contrast, our models balance the performance between SEA and English knowledge, resulting in a high score for all benchmarks." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.466, + 0.727, + 0.48 + ], + "angle": 0, + "content": "5.3 Performance Analysis" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.487, + 0.884, + 0.533 + ], + "angle": 0, + "content": "In this study, we discuss the performance improvement in each design decision of our models (Tables 3 and 5) as follows." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.536, + 0.884, + 0.743 + ], + "angle": 0, + "content": "Stage 1: English instruction fine tuning In Stage 1 IFT, the focus is predominantly on gaining general capabilities in math, code and general instruction following in the English language. Although our CPT models are based off of the instruct versions of Llama-3.1-8B, the CPT process has eroded the instruction following capabilities (See Table 5). We observe an increase of 3.86 and 9.72 for Stage-1-Llama and Stage-1-Gemma respectively in English instruction following capabilities on the IFEval benchmark. We also observe an average increase of 7.9 for Stage-1-Llama and 7.47 for Stage-1-Gemma for the SEA-HELM benchmark." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.745, + 0.884, + 0.871 + ], + "angle": 0, + "content": "Stage 2: Multilingual instruction fine tuning In Stage 2 IFT, the focus is on multilingual and reasoning capabilities. By instruction fine tuning on SEA languages and higher complexity English instruction pairs, the Stage 2 models saw an average increase of 8.73 for Stage-2-Llama and 10.1 for Stage-2-Gemma over Stage 1 models on the SEAHELM benchmark." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.874, + 0.884, + 0.921 + ], + "angle": 0, + "content": "Merge 1: Combining Stage 1 and Stage 2 Despite the significant gains observed in Stage 1 and 2, we observed that the effects of catastrophic for" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.488, + 0.196 + ], + "angle": 0, + "content": "getting from earlier stages could still be observed after Stage 2. In order to mitigate this, we merge Stage 1 and Stage 2 models into the CPT model, after which we we observed an average increase of 2.6 for Merge-1-Gemma. We also observed an increase across all SEA-HELM benchmark tasks for Merge-1-Llama." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.198, + 0.49, + 0.389 + ], + "angle": 0, + "content": "Merge 2: Incorporating instruct models To reintroduce helpfulness, relevance and informativeness of responses observed in Llama 3.1 and Gemma 2 models, we perform further merges of open-source instruct models. While we observed significant increases in MT-Bench benchmark scores for Vietnamese and Thai, we also observed a slight degradation of average SEA-HELM performance as well as a slight degradation of Indonesian MT-Bench scores, which we view as acceptable trade-offs for the significant performance increases in Vietnamese and Thai." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.391, + 0.49, + 0.599 + ], + "angle": 0, + "content": "Alignment steps In the alignment step to align the models to human preference, we prioritize the SEA MTBench performance over the other SEA-HELM benchmark tasks. We observed a broad increase in SEA MTBench performances across all languages for both models. However, this comes with minor degradation of instruction following capabilities and overall Indonesian SEA-HELM performance. The alignment step encourages longer, more helpful and sensitive responses but hurts performance on task-specific benchmarks and instruction following in some languages – an issue we address in the next step." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.6, + 0.49, + 0.921 + ], + "angle": 0, + "content": "Final merge: Combining aligned models To compensate for the capability degradation in the previous steps, we merge Merge-2-Llama and Merge-1-Gemma with Aligned-SimPO-Llama and Aligned-SimPO-Gemma and various open sourced pretrained models describe in sections 3.2.1 and 3.2.2 for their respective model families. For Llama-SEA-LION-8B-IT, we observed a significant increase in average SEA-HELM performance (61.84) from the alignment stage (51.30), mainly from the increase in performance for the core tasks in SEA-HELM. This performance increase demonstrates the value of empirical selection of pre-trained models to be merged in based on each model's strengths and weaknesses to produce a far superior model. For Gemma-SEA-LION-9B-IT, it easily achieves higher performance compared to the Llama-SEA-LION-8B-IT with fewer post training steps. We attribute this performance to the high performance of the base Gemma 2 model and also to the larger vocab" + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.085, + 0.882, + 0.117 + ], + "angle": 0, + "content": "ulary size which have been demonstrated (Takase et al., 2024) to produce better models." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.131, + 0.673, + 0.145 + ], + "angle": 0, + "content": "6 Related Works" + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.158, + 0.885, + 0.478 + ], + "angle": 0, + "content": "Recently, researchers have proposed large language models that support multilingual settings. Llama (Dubey et al., 2024) is the prior effort to release an open-source large language model for the research community to develop their own models. Then, Qwen (Yang et al., 2024a) and Gemma (Rivière et al., 2024) introduced open-source LLMs that perform comparably or better than Llama with a larger amount of training data and many supported languages for these recent models. Massively multilingual open-source models like Bloom (Scao et al., 2022) and Aya (Ustun et al., 2024) also support a very wide range of languages, including some SEA languages. Although these models demonstrate a robust performance in English benchmarks, they mostly underperformed on SEA benchmarks that tested for SEA languages, SEA knowledge and cultural understanding (Lovenia et al., 2024; Susanto et al., 2025), presumably due to a lack of language support for certain SEA languages or cultures." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.48, + 0.884, + 0.752 + ], + "angle": 0, + "content": "In the SEA community, many works propose a large language model that is designed specifically for SEA languages by adding more SEA tokens in the training process, such as SeaLLMs (Nguyen et al., 2024) and Sailor (Sailor2 Team, 2024). However, the performance of these models is robust only on in-domain datasets or favors only some tasks (i.e., classical NLP datasets). This is because the design choice in the pre-training or fine-tuning of these models is not well studied, e.g., performing a single SFT step with low-quality datasets written in some SEA languages, resulting in a slight improvement on SEA benchmarks. To create a robust SEA LLM, we need to carefully balance language representation and design both pre-training and post-training (i.e., SFT, alignment, and model merging) for SEA contexts." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.767, + 0.642, + 0.782 + ], + "angle": 0, + "content": "7 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.794, + 0.885, + 0.921 + ], + "angle": 0, + "content": "Despite the sizable population and language diversity in Southeast Asia, there remains a scarcity of resources and accurate linguistic and cultural representation with open-source LLMs. In this paper, we introduce Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, two multilingual LLMs comprehensively trained to achieve state-of-the-art performances in SEA languages, based on the Llama and" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.493, + 0.295 + ], + "angle": 0, + "content": "Gemma family of LLMs. SEA-LION represents the next advancement in the development of LLMs that explicitly supports SEA languages. Both models are fully open-source and available for commercial use to increase accessibility and innovation in multilingual LLMs in Southeast Asia. We will make our resources publicly available — including the dataset, training scripts, training checkpoints, and all fine-tuned models, even those that achieve state-of-the-art performance on the benchmarks — to establish solid baselines, ensure reproducibility, and support future research focused on culturally and professionally relevant SEA applications." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.308, + 0.272, + 0.325 + ], + "angle": 0, + "content": "Acknowledgment" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.334, + 0.49, + 0.448 + ], + "angle": 0, + "content": "This research is supported by the National Research Foundation, Singapore, under its National Large Language Models Funding Initiative. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author(s) and do not reflect the views of the National Research Foundation, Singapore." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.461, + 0.212, + 0.475 + ], + "angle": 0, + "content": "Limitation" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.487, + 0.49, + 0.695 + ], + "angle": 0, + "content": "Although we propose the state-of-the-art SEA LLMs, we found that the benchmark might not cover all the properties and languages we want to evaluate. For example, SEA-HELM is a robustness benchmark, but only covers four languages. SEACrowd is a benchmark that covers all SEA languages, but it is only classical NLP datasets (no chat or instruction following datasets). We require a more holistic SEA benchmark that covers LLM-specific tasks written in all SEA languages. However, with the current evaluation design choice, these benchmarks are the best design choice for current SEA research works." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.697, + 0.49, + 0.84 + ], + "angle": 0, + "content": "Moreover, we conduct experiments using only 8 and 9 billion parameter models. We argue that this is the most commonly used model size in real-world scenarios. In addition, our method can and should also work with a higher or smaller model size since our proposed technique does not rely on the model size, as we demonstrated by applying the SFT and alignment techniques on both Llama and Gemma models." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.87, + 0.214, + 0.884 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.894, + 0.488, + 0.92 + ], + "angle": 0, + "content": "SCB 10X, VISTEC, and SEACrowd. 2024. Thai llm leaderboard." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.086, + 0.851, + 0.1 + ], + "angle": 0, + "content": "AI Singapore AI Products Team. 2024. Sea-helm." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.11, + 0.791, + 0.124 + ], + "angle": 0, + "content": "AISG AI Singapore. 2023. Sea-lion-pile." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.134, + 0.813, + 0.148 + ], + "angle": 0, + "content": "AISG AI Singapore. 2025. Sea-lion-pile-v2." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.158, + 0.885, + 0.251 + ], + "angle": 0, + "content": "Anton Alexandrov, Veselin Raychev, Mark Niklas Mueller, Ce Zhang, Martin Vechev, and Kristina Toutanova. 2024. Mitigating catastrophic forgetting in language transfer via model merging. In Findings of the Association for Computational Linguistics: EMNLP 2024, pages 17167-17186, Miami, Florida, USA. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.26, + 0.804, + 0.274 + ], + "angle": 0, + "content": "Arcee-AI. 2024. Llama-3.1-supernova-lite." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.284, + 0.885, + 0.39 + ], + "angle": 0, + "content": "Adrien Barbaresi. 2021. Trafilatura: A Web Scraping Library and Command-Line Tool for Text Discovery and Extraction. In Proceedings of the Joint Conference of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing: System Demonstrations, pages 122-131. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.399, + 0.885, + 0.426 + ], + "angle": 0, + "content": "BAAI Beijing Academy of Artificial Intelligence. 2024. Infinity instruct." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.436, + 0.885, + 0.632 + ], + "angle": 0, + "content": "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners. In Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.643, + 0.773, + 0.656 + ], + "angle": 0, + "content": "CommonCrawl. 2024. Commoncrawl." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.666, + 0.885, + 0.77 + ], + "angle": 0, + "content": "Ganqu Cui, Lifan Yuan, Ning Ding, Guanming Yao, Bingxiang He, Wei Zhu, Yuan Ni, Guotong Xie, Ruobing Xie, Yankai Lin, Zhiyuan Liu, and Maosong Sun. 2024. ULTRAFEEDBACK: boosting language models with scaled AI feedback. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.781, + 0.746, + 0.794 + ], + "angle": 0, + "content": "DAMO-NLP-SG. 2024. Seaexam." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.805, + 0.885, + 0.858 + ], + "angle": 0, + "content": "Longxu Dou, Qian Liu, Guangtao Zeng, Jia Guo, Jiahui Zhou, Wei Lu, and Min Lin. 2024. *Sailor: Open language models for south-east asia. CoRR*, abs/2404.03608." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.868, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, Anirudh Goyal, Anthony Hartshorn, Aobo Yang," + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.086, + 0.885, + 0.922 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "text", + "bbox": [ + 0.133, + 0.086, + 0.49, + 0.464 + ], + "angle": 0, + "content": "Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, Aurélien Rodriguez, Austen Gregerson, Ava Spataru, Baptiste Rozière, Bethany Biron, Binh Tang, Bobbie Chern, Charlotte Caucheteux, Chaya Nayak, Chloe Bi, Chris Marra, Chris McConnell, Christian Keller, Christophe Touret, Chunyang Wu, Corinne Wong, Cristian Canton Ferrer, Cyrus Nikolaidis, Damien Alonsius, Daniel Song, Danielle Pintz, Danny Livshits, David Esiobu, Dhruv Choudhary, Dhruv Mahajan, Diego Garcia-Olano, Diego Perino, Dieuwke Hupkes, Egor Lakomkin, Ehab AlBadawy, Elina Lobanova, Emily Dinan, Eric Michael Smith, Filip Radenovic, Frank Zhang, Gabriel Synnaeve, Gabrielle Lee, Georgia Lewis Anderson, Graeme Nail, Grégoire Mialon, Guan Pang, Guillem Cucurell, Hailey Nguyen, Hannah Korevaar, Hu Xu, Hugo Touvron, Iliyan Zarov, Imanol Arrieta Ibarra, Isabel M. Kloumann, Ishan Misra, Ivan Evtimov, Jade Copet, Jaewon Lee, Jan Geffert, Jana Vranes, Jason Park, Jay Mahadeokar, Jeet Shah, Jelmer van der Linde, Jennifer Billock, Jenny Hong, Jenya Lee, Jeremy Fu, Jianfeng Chi, Jianyu Huang, Jiawen Liu, Jie Wang, Jiecao Yu, Joanna Bitton, Joe Spisak, Jongsoo Park, Joseph Rocca, Joshua Johnstun, Joshua Saxe, Junteng Jia, Kalyan Vasuden Alwala, Kartikeya Upasani, Kate Plawiak, Ke Li, Kenneth Heafield, Kevin Stone, and et al. 2024. The llama 3 herd of models. CoRR abs/2407.21783." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.476, + 0.486, + 0.504 + ], + "angle": 0, + "content": "Wikipedia Foundation. 2024. Wikipedia enterprise. html dumps downloads." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.516, + 0.49, + 0.634 + ], + "angle": 0, + "content": "Charles Goddard, Shamane Siriwardhana, Malikeh Ehghaghi, Luke Meyers, Vladimir Karpukhin, Brian Benedict, Mark McQuade, and Jacob Solawetz. 2024. Arcee's mergekit: A toolkit for merging large language models. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: EMNLP 2024 - Industry Track, Miami, Florida, USA, November 12-16, 2024, pages 477-485. Association for Computational Linguistics." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.646, + 0.488, + 0.737 + ], + "angle": 0, + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021. Measuring mathematical problem solving with the MATH dataset. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.75, + 0.488, + 0.816 + ], + "angle": 0, + "content": "Pin-Lun Hsu, Yun Dai, Vignesh Kothapalli, Qingquan Song, Shao Tang, Siyu Zhu, Steven Shimizu, Shivam Sahni, Haowen Ning, and Yanning Chen. 2024. Liger kernel: Efficient triton kernels for lIm training. arXiv preprint arXiv:2410.10989." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.828, + 0.49, + 0.921 + ], + "angle": 0, + "content": "Shengding Hu, Yuge Tu, Xu Han, Chaoqun He, Ganqu Cui, Xiang Long, Zhi Zheng, Yewei Fang, Yuxiang Huang, Weilin Zhao, Xinrong Zhang, Zhen Leng Thai, Kai Zhang, Chongyi Wang, Yuan Yao, Chenyang Zhao, Jie Zhou, Jie Cai, Zhongwu Zhai, Ning Ding, Chao Jia, Guoyang Zeng, Dahai Li, Zhiyuan Liu, and Maosong Sun. 2024. Minicpm: Un" + }, + { + "type": "text", + "bbox": [ + 0.53, + 0.086, + 0.882, + 0.113 + ], + "angle": 0, + "content": "veiling the potential of small language models with scalable training strategies. CoRR, abs/2404.06395." + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.121, + 0.807, + 0.136 + ], + "angle": 0, + "content": "HuggingFace. 2024. Open llm leaderboard." + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.144, + 0.885, + 0.223 + ], + "angle": 0, + "content": "Gabriel Ilharco, Marco Túlio Ribeiro, Mitchell Wortsman, Ludwig Schmidt, Hannaneh Hajishirzi, and Ali Farhadi. 2023. Editing models with task arithmetic. In *The Eleventh International Conference on Learning Representations*, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net." + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.231, + 0.885, + 0.323 + ], + "angle": 0, + "content": "Armand Joulin, Edouard Grave, Piotr Bojanowski, and Tomas Mikolov. 2017. Bag of tricks for efficient text classification. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 2, Short Papers, pages 427-431. Association for Computational Linguistics." + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.332, + 0.885, + 0.41 + ], + "angle": 0, + "content": "Wei Qi Leong, Jian Gang Ngui, Yosephine Susanto, Hamsawardhini Rengarajan, Kengatharayer Sarveswaran, and William-Chandra Tjhi. 2023. BHASA: A holistic southeast asian linguistic and cultural evaluation suite for large language models. CoRR, abs/2309.06085." + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.42, + 0.885, + 0.485 + ], + "angle": 0, + "content": "Ilya Loshchilov and Frank Hutter. 2019. Decoupled weight decay regularization. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net." + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.494, + 0.885, + 0.886 + ], + "angle": 0, + "content": "Holy Lovenia, Rahmad Mahendra, Salsabil Maulana Akbar, Lester James V. Miranda, Jennifer Santoso, Elyanah Aco, Akhdan Fadhilah, Jonibek Mansurov, Joseph Marvin Imperial, Onno Kampman, Joel Ruben Antony Moniz, Muhammad Ravi Shulthan Habibi, Frederikus Hudi, Jann Montalan, Ryan Hadiwijaya, Joanito Agili Lopo, William Nixon, Borje Karlsson, James Jaya, Ryandito Diandaru, Yuze Gao, Patrick Amadeus Irawan, Bin Wang, Jan Christian Blaise Cruz, Chenxi Whitehouse, Ivan Halim Parmonangan, Maria Khelli, Wenyu Zhang, Lucky Susanto, Reynard Adha Ryanda, Sonny Lazuardi Hermawan, Dan John Velasco, Muhammad Dehan Al Koutsar, Willy Fitra Hendria, Yasmin Moslem, Noah Flynn, Muhammad Farid Adilazuarda, Haochen Li, Johannes Lee, R. Damanhuri, Shuo Sun, Muhammad Reza Qorib, Amirbek Djanibekov, Wei Qi Leong, Quyet V. Do, Niklas Muennighoff, Tanrada Pansuwan, Ilham Firdausi Putra, Yan Xu, Ngee Tai Chia, Ayu Purwarianti, Sebastian Ruder, William-Chandra Tjhi, Peerat Limkonchotiwat, Alham Fikri Aji, Sedrick Keh, Genta Indra Winata, Ruochen Zhang, Fajri Koto, Zheng Xin Yong, and Samuel Cahyawijaya. 2024. Seacrowd: A multilingual multimodal data hub and benchmark suite for southeast asian languages. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami FL, USA, November 12-16, 2024, pages 5155-5203. Association for Computational Linguistics." + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.894, + 0.885, + 0.921 + ], + "angle": 0, + "content": "Anton Lozhkov, Raymond Li, Loubna Ben Allal, Federico Cassano, Joel Lamy-Poirier, Nouamane Tazi," + } + ], + [ + { + "type": "text", + "bbox": [ + 0.133, + 0.086, + 0.49, + 0.308 + ], + "angle": 0, + "content": "Ao Tang, Dmytro Pykhtar, Jiawei Liu, Yuxiang Wei, Tianyang Liu, Max Tian, Denis Kocetkov, Arthur Zucker, Younes Belkada, Zijian Wang, Qian Liu, Dmitry Abulkhanov, Indraneil Paul, Zhuang Li, Wen-Ding Li, Megan Risdal, Jia Li, Jian Zhu, Terry Yue Zhuo, Evgenii Zheltonozhskii, Nii Osae Osae Dade, Wenhao Yu, Lucas Krauß, Naman Jain, Yixuan Su, Xuanli He, Manan Dey, Edoardo Abati, Yekun Chai, Niklas Muennighoff, Xiangru Tang, Muhtasham Oblokulov, Christopher Akiki, Marc Marone, Cheng-hao Mou, Mayank Mishra, Alex Gu, Binyuan Hui, Tri Dao, Armel Zebaze, Olivier Dehaene, Nicolas Patry, Canwen Xu, Julian J. McAuley, Han Hu, Torsten Scholak, Sébastien Paquet, Jennifer Robinson, Carolyn Jane Anderson, Nicolas Chapados, and et al. 2024. Starcoder 2 and the stack v2: The next generation. CoRR, abs/2402.19173." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.317, + 0.49, + 0.358 + ], + "angle": 0, + "content": "Yu Meng, Mengzhou Xia, and Danqi Chen. 2024. Simpo: Simple preference optimization with a reference-free reward. CoRR, abs/2405.14734." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.366, + 0.49, + 0.498 + ], + "angle": 0, + "content": "Xuan-Phi Nguyen, Wenxuan Zhang, Xin Li, Mahani Aljunied, Zhiqiang Hu, Chenhui Shen, Yew Ken Chia, Xingxuan Li, Jianyu Wang, Qingyu Tan, Liying Cheng, Guanzheng Chen, Yue Deng, Sen Yang, Chaoqun Liu, Hang Zhang, and Lidong Bing. 2024. SeaLLMs - large language models for Southeast Asia. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), pages 294-304, Bangkok, Thailand. Association for Computational Linguistics." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.507, + 0.489, + 0.533 + ], + "angle": 0, + "content": "OpenAI. 2023. GPT-4 technical report. CoRR, abs/2303.08774." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.542, + 0.489, + 0.608 + ], + "angle": 0, + "content": "Guilherme Penedo, Hynek Kydlicek, Loubna Ben Allal, Anton Lozhkov, Margaret Mitchell, Colin Raffel, Leandro von Werra, and Thomas Wolf. 2024. The fine web datasets: Decanting the web for the finest text data at scale. CoRR, abs/2406.17557." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.617, + 0.49, + 0.709 + ], + "angle": 0, + "content": "Ivan Provilkov, Dmitrii Emelianenko, and Elena Voita. 2020. Bpe-dropout: Simple and effective subword regularization. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, ACL 2020, Online, July 5-10, 2020, pages 1882-1892. Association for Computational Linguistics." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.718, + 0.49, + 0.811 + ], + "angle": 0, + "content": "Jeff Rasley, Samyam Rajbhandari, Olatunjri Ruwase, and Yuxiong He. 2020. Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters. In KDD '20: The 26th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, Virtual Event, CA, USA, August 23-27, 2020, pages 3505-3506. ACM." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.819, + 0.49, + 0.884 + ], + "angle": 0, + "content": "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. 2023. GPQA: A graduate-level google-proof q&a benchmark. CoRR, abs/2311.12022." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.894, + 0.489, + 0.922 + ], + "angle": 0, + "content": "Morgane Rivière, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, Léonard" + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.086, + 0.885, + 0.504 + ], + "angle": 0, + "content": "Hussenot, Thomas Mesnard, Bobak Shahriari, Alexandre Ramé, Johan Ferret, Peter Liu, Pouya Tafti, Abe Friesen, Michelle Casbon, Sabela Ramos, Ravin Kumar, Charline Le Lan, Sammy Jerome, Anton Tsitsulin, Nino Vieillard, Piotr Stanczyk, Sertan Girgin, Nikola Momchev, Matt Hoffman, Shantanu Thakoor, Jean-Bastien Grill, Behnam Neyshabur, Olivier Bachem, Alanna Walton, Aliaksei Severyn, Alicia Parrish, Aliya Ahmad, Allen Hutchison, Alvin Abdagic, Amanda Carl, Amy Shen, Andy Brock, Andy Coenen, Anthony Laforge, Antonia Paterson, Ben Bastian, Bilal Piot, Bo Wu, Brandon Royal, Charlie Chen, Chintu Kumar, Chris Perry, Chris Welty, Christopher A. Choquette-Choo, Danila Sinopalnikov, David Weinberger, Dimple Vijaykumar, Dominika Rogozinska, Dustin Herbison, Elisa Bandy, Emma Wang, Eric Noland, Erica Moreira, Evan Senter, Evgenii Eltsyshev, Francesco Visin, Gabriel Rasskin, Gary Wei, Glenn Cameron, Gus Martins, Hadi Hashemi, Hanna Klimczak-Plucinska, Harleen Batra, Harsh Dhand, Ivan Nardini, Jacinda Mein, Jack Zhou, James Svensson, Jeff Stanway, Jetha Chan, Jin Peng Zhou, Joana Carrasqueira, Joana Iljazi, Jocelyn Becker, Joe Fernandez, Joost van Amersfoort, Josh Gordon, Josh Lipschultz, Josh Newlan, Ju-yeong Ji, Kareem Mohamed, Kartikeya Badola, Kat Black, Katie Millican, Keelin McDonell, Kelvin Nguyen, Kiranbir Sodhia, Kish Greene, Lars Lowe Sjösund, Lauren Usui, Laurent Sifre, Lena Heuermann, Leticia Lago, and Lilly McNealus. 2024. Gemma 2: Improving open language models at a practical size. CoRR, abs/2408.00118." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.513, + 0.883, + 0.54 + ], + "angle": 0, + "content": "Sailor2 Team. 2024. Sailor2: Sailing in south-east asia with inclusive multilingual llm." + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.548, + 0.884, + 0.797 + ], + "angle": 0, + "content": "Teven Le Scao, Angela Fan, Christopher Akiki, Ellie Pavlick, Suzana Ilic, Daniel Hesslow, Roman Castagné, Alexandra Sasha Luccioni, François Yvon, Matthias Galle, Jonathan Tow, Alexander M. Rush, Stella Biderman, Albert Webson, Pawan Sasanka Ammanamanchi, Thomas Wang, Benoit Sagot, Niklas Muennighoff, Albert Villanova del Moral, Olatunj Ruwase, Rachel Bawden, Stas Bekman, Angelina McMillan-Major, Iz Beltagy, Huu Nguyen, Lucile Saulnier, Samson Tan, Pedro Ortiz Suarez, Victor Sanh, Hugo Laurençon, Yacine Jernite, Julien Launay, Margaret Mitchell, Colin Raffel, Aaron Gokaslan, Adi Simhi, Aitor Soroa, Alham Fikri Aji, Amit Alfassy, Anna Rogers, Ariel Kreisberg Nitzav, Canwen Xu, Chenghao Mou, Chris Emezue, Christopher Klamm, Colin Leong, Daniel van Strien, David Ifeoluwa Adelani, and et al. 2022. BLOOM: A 176b-parameter open-access multilingual language model. CoRR, abs/2211.05100." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.806, + 0.883, + 0.833 + ], + "angle": 0, + "content": "AI Singapore SEA-LION Team. 2024. Llama3 8b cpt sea-lionv2.1 instruct." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.842, + 0.884, + 0.922 + ], + "angle": 0, + "content": "Luca Soldaini, Rodney Kinney, Akshita Bhagia, Dustin Schwenk, David Atkinson, Russell Authur, Ben Bogin, Khyathi Chandu, Jennifer Dumas, Yanai Elazar, Valentin Hofmann, Ananya Jha, Sachin Kumar, Li Lucy, Xinxi Lyu, Nathan Lambert, Ian Magnusson, Jacob Morrison, Niklas Muennighoff," + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.135, + 0.086, + 0.49, + 0.231 + ], + "angle": 0, + "content": "Aakanksha Naik, Crystal Nam, Matthew Peters, Abhilasha Ravichander, Kyle Richardson, Zejiang Shen, Emma Strubell, Nishant Subramani, Oyvind Tafjord, Evan Walsh, Luke Zettlemoyer, Noah Smith, Hannaneh Hajishirzi, Iz Beltagy, Dirk Groeneveld, Jesse Dodge, and Kyle Lo. 2024. Dolma: an open corpus of three trillion tokens for language model pretraining research. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 15725-15788, Bangkok, Thailand. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.24, + 0.488, + 0.318 + ], + "angle": 0, + "content": "Zayne Sprague, Xi Ye, Kaj Bostrom, Swarat Chaudhuri, and Greg Durrett. 2024. Musr: Testing the limits of chain-of-thought with multistep soft reasoning. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.328, + 0.488, + 0.419 + ], + "angle": 0, + "content": "Yosephine Susanto, Adithya Venkatadri Hulagadri, Jann Railey Montalan, Jian Gang Ngui, Xian Bin Yong, Weiqi Leong, Hamsawardhini Rengarajan, Peerat Limkonchotiwat, Yifan Mai, and William Chandra Tjhi. 2025. Sea-helm: Southeast asian holistic evaluation of language models. Preprint, arXiv:2502.14301." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.429, + 0.488, + 0.547 + ], + "angle": 0, + "content": "Mirac Suzgun, Nathan Scales, Nathanael Schärli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V. Le, Ed H. Chi, Denny Zhou, and Jason Wei. 2023. Challenging big-bench tasks and whether chain-of-thought can solve them. In *Findings of the Association for Computational Linguistics: ACL* 2023, Toronto, Canada, July 9-14, 2023, pages 13003-13051. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.557, + 0.488, + 0.595 + ], + "angle": 0, + "content": "Sho Takase, Ryokan Ri, Shun Kiyono, and Takuya Kato. 2024. Large vocabulary size improves large language models. CoRR, abs/2406.16508." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.606, + 0.486, + 0.633 + ], + "angle": 0, + "content": "Sailor Team. 2024. Sailor2: Sailing in south-east asia with inclusive multilingual llms." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.642, + 0.488, + 0.669 + ], + "angle": 0, + "content": "The Mosaic ML Team. 2021. composer. https://github.com/mosaicml/composer/." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.678, + 0.488, + 0.705 + ], + "angle": 0, + "content": "The Mosaic ML Team. 2022. Llm foundry. https://github.com/mosaicml/llm-foundry." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.714, + 0.488, + 0.78 + ], + "angle": 0, + "content": "Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. 2024. Openmathinstruct-2: Accelerating AI for math with massive open-source instruction data. CoRR, abs/2410.01560." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.789, + 0.488, + 0.921 + ], + "angle": 0, + "content": "Ahmet Üstün, Viraat Aryabumi, Zheng Xin Yong, WeiYin Ko, Daniel D'souza, Gbemileke Onilude, Neel Bhandari, Shivalika Singh, Hui-Lee Ooi, Amr Kayid, Freddie Vargus, Phil Blunsom, Shayne Longpre, Niklas Muennighoff, Marzieh Fadaee, Julia Kreutzer, and Sara Hooker. 2024. Aya model: An instruction finetuned open-access multilingual language model. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August" + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.53, + 0.086, + 0.882, + 0.113 + ], + "angle": 0, + "content": "11-16, 2024, pages 15894-15939. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.123, + 0.884, + 0.255 + ], + "angle": 0, + "content": "Bin Wang, Zhengyuan Liu, Xin Huang, Fangkai Jiao, Yang Ding, AiTi Aw, and Nancy Chen. 2024a. Sealeval for multilingual foundation models: From crosslingual alignment to cultural reasoning. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), NAACL 2024, Mexico City, Mexico, June 16-21, 2024, pages 370-390. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.265, + 0.884, + 0.356 + ], + "angle": 0, + "content": "Ke Wang, Nikolaos Dimitriadis, Guillermo Ortiz-Jiménez, François Fleuret, and Pascal Frossard. 2024b. Localizing task information for improved model merging and compression. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.367, + 0.883, + 0.459 + ], + "angle": 0, + "content": "Yubo Wang, Xueguang Ma, Ge Zhang, Yuansheng Ni, Abhranil Chandra, Shiguang Guo, Weiming Ren, Aaran Arulraj, Xuan He, Ziyan Jiang, Tianle Li, Max Ku, Kai Wang, Alex Zhuang, Rongqi Fan, Xiang Yue, and Wenhu Chen. 2024c. Mmlu-pro: A more robust and challenging multi-task language understanding benchmark. CoRR, abs/2406.01574." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.469, + 0.884, + 0.575 + ], + "angle": 0, + "content": "Chris Wendler, Veniamin Veselovsky, Giovanni Monea, and Robert West. 2024. Do llamas work in english? on the latent language of multilingual transformers. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pages 15366-15394. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.585, + 0.884, + 0.715 + ], + "angle": 0, + "content": "Mitchell Wortsman, Peter J. Liu, Lechao Xiao, Katie E. Everett, Alexander A. Alemi, Ben Adlam, John D. Co-Reyes, Izzeddin Gur, Abhishek Kumar, Roman Novak, Jeffrey Pennington, Jascha Sohl-Dickstein, Kelvin Xu, Jaehoon Lee, Justin Gilmer, and Simon Kornblith. 2024. Small-scale proxies for large-scale transformer training instabilities. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. Open-Review.net." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.726, + 0.883, + 0.792 + ], + "angle": 0, + "content": "Zhangchen Xu, Fengqing Jiang, Luyao Niu, Yuntian Deng, Radha Poovendran, Yejin Choi, and Bill Yuchen Lin. 2024. Magpie: Alignment data synthesis from scratch by prompting aligned llms with nothing. CoRR, abs/2406.08464." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.803, + 0.884, + 0.921 + ], + "angle": 0, + "content": "An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jianxin Yang, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize" + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.086, + 0.884, + 0.921 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.135, + 0.086, + 0.49, + 0.191 + ], + "angle": 0, + "content": "Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Xuejing Liu, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, Zhifang Guo, and Zhihao Fan. 2024a. Qwen2 technical report. CoRR, abs/2407.10671." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.202, + 0.489, + 0.254 + ], + "angle": 0, + "content": "Ziyi Yang, Fanqi Wan, Longguang Zhong, Tianyuan Shi, and Xiaojun Quan. 2024b. Weighted-reward preference optimization for implicit model fusion. CoRR, abs/2412.03187." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.265, + 0.489, + 0.357 + ], + "angle": 0, + "content": "Wei Jie Yeo, Teddy Ferdinan, Przemyslaw Kazienko, Ranjan Satapathy, and Erik Cambria. 2024. Self-training large language models through knowledge detection. In *Findings of the Association for Computational Linguistics: EMNLP* 2024, Miami, Florida, USA, November 12-16, 2024, pages 15033-15045. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.368, + 0.489, + 0.447 + ], + "angle": 0, + "content": "Le Yu, Bowen Yu, Haiyang Yu, Fei Huang, and Yongbin Li. 2024. Language models are super mario: Absorbing abilities from homologous models as a free lunch. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.458, + 0.489, + 0.536 + ], + "angle": 0, + "content": "Wenxuan Zhang, Hou Pong Chan, Yiran Zhao, Mahani Aljunied, Jianyu Wang, Chaoqun Liu, Yue Deng, Zhiqiang Hu, Weiwen Xu, Yew Ken Chia, Xin Li, and Lidong Bing. 2024a. Seallms 3: Open foundation and chat multilingual large language models for southeast asian languages. CoRR, abs/2407.19672." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.547, + 0.489, + 0.626 + ], + "angle": 0, + "content": "Xulang Zhang, Rui Mao, and Erik Cambria. 2024b. Multilingual emotion recognition: Discovering the variations of lexical semantics between languages. In International Joint Conference on Neural Networks, IJCNN 2024, Yokohama, Japan, June 30 - July 5, 2024, pages 1-9. IEEE." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.637, + 0.489, + 0.741 + ], + "angle": 0, + "content": "Yanli Zhao, Andrew Gu, Rohan Varma, Liang Luo, Chien-Chin Huang, Min Xu, Less Wright, Hamid Shojanazeri, Myle Ott, Sam Shleifer, Alban Desmaison, Can Balioglu, Pritam Damania, Bernard Nguyen, Geeta Chauhan, Yuchen Hao, Ajit Mathews, and Shen Li. 2023. Pytorch FSDP: experiences on scaling fully sharded data parallel. Proc. VLDB Endow., 16(12):3848-3860." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.752, + 0.489, + 0.856 + ], + "angle": 0, + "content": "Wenzhen Zheng, Wenbo Pan, Xu Xu, Libo Qin, Li Yue, and Ming Zhou. 2024a. Breaking language barriers: Cross-lingual continual pre-training at scale. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pages 7725-7738. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.868, + 0.489, + 0.921 + ], + "angle": 0, + "content": "Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. 2024b. Llamafactory: Unified efficient fine-tuning of \\(100+\\) language models. In Proceedings of the" + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.529, + 0.086, + 0.884, + 0.139 + ], + "angle": 0, + "content": "62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), Bangkok, Thailand. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.149, + 0.883, + 0.214 + ], + "angle": 0, + "content": "Chengzhi Zhong, Fei Cheng, Qianying Liu, Junfeng Jiang, Zhen Wan, Chenhui Chu, Yugo Murawaki, and Sadao Kurohashi. 2024. Beyond english-centric llms: What language do multilingual language models think in? CoRR, abs/2408.10811." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.224, + 0.883, + 0.278 + ], + "angle": 0, + "content": "Jeffrey Zhou, Tianjian Lu, Swaroop Mishra, Siddhartha Brahma, Sujoy Basu, Yi Luan, Denny Zhou, and Le Hou. 2023. Instruction-following evaluation for large language models. CoRR, abs/2311.07911." + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.086, + 0.884, + 0.278 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.085, + 0.239, + 0.101 + ], + "angle": 0, + "content": "A Appendix" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.11, + 0.441, + 0.126 + ], + "angle": 0, + "content": "A.1 Continued pre-training (CPT) data" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.131, + 0.768, + 0.146 + ], + "angle": 0, + "content": "Existing data: We utilize existing datasets as shown in Table 6 (HuggingFace Datasets)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.147, + 0.882, + 0.179 + ], + "angle": 0, + "content": "Other data: As shown in Table 6 (the other data section), the listed datasets contain data from a diverse range of domains, including news, books, articles, poems, etc." + }, + { + "type": "table", + "bbox": [ + 0.296, + 0.19, + 0.703, + 0.33 + ], + "angle": 0, + "content": "
Continued Pre-training Data
Source (HuggingFace Datasets)LanguagesSize (Billions of Tokens)
bigcode/the-stack-v2-dedupCODE40
allenai/dolmaEN37.5
HuggingFaceFW/fineweb-eduEN7.5
aisingapore/SEA-PILE-v1SEA47.58
aisingapore/SEA-PILE-v2ID7
Source (Others)LanguagesSize (Billions of Tokens)
VinBigDataVI16
WangChanBERTaTH8.5
Others - ENEN5
Others - SEASEA30.92
" + }, + { + "type": "table_caption", + "bbox": [ + 0.293, + 0.34, + 0.702, + 0.355 + ], + "angle": 0, + "content": "Table 6: List of datasets for the continued pre-training stage." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.381, + 0.296, + 0.397 + ], + "angle": 0, + "content": "A.2 Stage 1 IFT data" + }, + { + "type": "table_caption", + "bbox": [ + 0.44, + 0.414, + 0.558, + 0.424 + ], + "angle": 0, + "content": "Stage 1 IFT Datasets" + }, + { + "type": "table", + "bbox": [ + 0.336, + 0.424, + 0.663, + 0.462 + ], + "angle": 0, + "content": "
Source (HuggingFace Datasets)LanguagesSize
BAAI/Infinity-InstructEN7,449,106
nvidia/OpenMathInstruct-2EN2,000,000
" + }, + { + "type": "table_caption", + "bbox": [ + 0.114, + 0.471, + 0.884, + 0.499 + ], + "angle": 0, + "content": "Table 7: List of datasets for Stage-1-IFT. For BAAI/Infinity-Instruct dataset, any conversation that originally ended with a user turn has had that last turn removed." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.527, + 0.296, + 0.543 + ], + "angle": 0, + "content": "A.3 Stage 2 IFT data" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.548, + 0.768, + 0.563 + ], + "angle": 0, + "content": "Existing data: We utilize existing datasets as shown in Table 9 (HuggingFace Datasets)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.564, + 0.882, + 0.593 + ], + "angle": 0, + "content": "Synthetic data: As shown in Table 9 (the generated part), we describe how to formulate synthetic data as follows" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.596, + 0.881, + 0.627 + ], + "angle": 0, + "content": "- qwen_gemma_synthetic datasets are generated first in English with Qwen 32B, utilizing an approach similar to Magpie. Instructions are then translated into the target language with Gemma 2 27B." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.629, + 0.881, + 0.674 + ], + "angle": 0, + "content": "- Llama_gemma_synthetic datasets are generated first in English with Llama 3.1 70B, utilizing an approach similar to Magpie (Xu et al., 2024). Instructions are then translated into the target language with Gemma 2 27B." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.677, + 0.882, + 0.692 + ], + "angle": 0, + "content": "- gemma_synthetic datasets are generated directly with Gemma 2 27B using Magpie (Xu et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.693, + 0.881, + 0.723 + ], + "angle": 0, + "content": "- sea_multilingual_systemchat is a synthetic dataset translated with Gemma 2 27B from the English systemchat dataset." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.726, + 0.85, + 0.74 + ], + "angle": 0, + "content": "- rewritten_oasst is a dataset rewritten with Gemma 2 27B based on the English OASST dataset." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.741, + 0.882, + 0.756 + ], + "angle": 0, + "content": "- rewritten_helpsteer is a dataset rewritten with Gemma 2 27B based on the English Helpsteer dataset." + }, + { + "type": "list", + "bbox": [ + 0.135, + 0.596, + 0.882, + 0.756 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.768, + 0.502, + 0.783 + ], + "angle": 0, + "content": "A.4 Helpfulness and preference alignment data" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.789, + 0.885, + 0.916 + ], + "angle": 0, + "content": "As shown in Table 8, we use the princeton-nlp/gemma2-ultrafeedback-armorm as the source of the alignment data. We then further re-scored with the reward model, nvidia/Llama-3.1-Nemotron-70B-Reward to create the SEA version. In particular, generated-gemma2-27b-seapref-nemotron-70b takes prompts from seald, wangchan_thainstruct, and additional hand-written Southeast Asian cultural prompts collected from native speakers and then generates responses (with a varying temperature) from them with Gemma 2 27B. The responses are then scored with nvidia/Llama-3.1-Nemotron-70B-Reward, with the top-scoring response selected as chosen and vice versa, similar to princeton-nlp/gemma2-ultrafeedback-armorm." + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.456, + 0.132, + 0.546, + 0.142 + ], + "angle": 0, + "content": "Preference Data" + }, + { + "type": "table", + "bbox": [ + 0.312, + 0.143, + 0.692, + 0.192 + ], + "angle": 0, + "content": "
Source (HuggingFace Datasets)LanguagesSize
princeton-nlp/gemma2-ultrafeedback-armormEN61,510
Source (Generated)LanguagesSize
generated-gemma2-27b-seapref-nemotron-70bSEA5,511
" + }, + { + "type": "table_caption", + "bbox": [ + 0.28, + 0.202, + 0.716, + 0.216 + ], + "angle": 0, + "content": "Table 8: List of preference datasets used for the alignment stage." + }, + { + "type": "table_caption", + "bbox": [ + 0.44, + 0.323, + 0.558, + 0.334 + ], + "angle": 0, + "content": "Stage 2 IFT Datasets" + }, + { + "type": "table", + "bbox": [ + 0.202, + 0.334, + 0.797, + 0.844 + ], + "angle": 0, + "content": "
Source (HuggingFace Datasets)LanguagesSize
BAAI/Infinity-Instruct^*EN1,456,927
HuggingFaceTB/smoltalkEN409,537
allenai/tulu-3-sft-personas-mathEN149,960
parinzee/seed-free-synthetic-instruct-thai-v1TH118,898
HuggingFaceTB/smoltalkEN96,356
HuggingFaceTB/smoltalkEN83,144
arcee-ai/EvolKit-75KEN74,174
AI-MO/NuminaMath-TIREN72,441
Post-training-Data-Flywheel/AutoIF-instruct-61kEN61,492
argilla/ifeval-like-dataEN56,339
HuggingFaceTB/smoltalkEN53,342
ai2-adapt-dev/tulu_v3.9_wildjailbreak_decontaminated_50kEN50,000
ai2-adapt-dev/tulu_v3.9_synthetic_finalresp_wildguardmixtrain_decontaminated_50kEN50,000
allenai/tulu-3-sft-personas-math-gradeEN49,980
allenai/tulu-3-sft-personas-codeEN34,999
HuggingFaceTB/smoltalkEN34,424
allenai/tulu-3-sft-personas-instruction-followingEN29,980
airesearch/WangchanThaiInstructTH25,014
allenai/tulu-3-sft-personas-algebraEN20,000
arcee-ai/EvolKit-20k-viVI15,378
allenai/coconotEN10,983
ai2-adapt-dev/tulu_v3.9_scirff_10kEN10,000
Source (Generated)LanguagesSize
qwen_gemma_synthetic_tamilTA480,000
qwen_gemma_synthetic_thaiTH480,000
qwen_gemma_synthetic_indonesianID465,019
qwen_gemma_synthetic_vietnameseVI465,019
gemma_synthetic_indonesianID458,149
gemma_synthetic_filipinoTL455,093
gemma_synthetic_vietVI291,576
gemma_synthetic_tamilTA276,314
gemma_synthetic_thaiTH186,339
gemma_synthetic_javaneseJV110,000
gemma_synthetic_sudaneseSU110,000
llama_gemma_synthetic_thaiTH88,920
llama_gemma_synthetic_tamilTA88,920
llama_gemma_synthetic_vietnameseVI88,920
llama_gemma_synthetic_javaneseJV88,920
llama_gemma_synthetic_indonesianID88,920
llama_gemma_synthetic_filipinoTL80,000
enrich_27kSEA27,463
seaMultilingual_systemchatSEA1,903
rewritten_oasstSEA841
rewritten_helpsteerSEA838
" + }, + { + "type": "table_caption", + "bbox": [ + 0.36, + 0.854, + 0.637, + 0.868 + ], + "angle": 0, + "content": "Table 9: List of datasets for Stage-2-IFT." + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05747/ec2428c8-34ac-40c3-9d6a-d16ed6c46317_origin.pdf b/data/2025/2504_05xxx/2504.05747/ec2428c8-34ac-40c3-9d6a-d16ed6c46317_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..20d6a57cd2f16c6e0d5a9a005a1b862648a14970 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/ec2428c8-34ac-40c3-9d6a-d16ed6c46317_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a9ab244e0d795e5a10e2b89a20072a80f0fabbf9f64431a181dbc153a00d533 +size 2395052 diff --git a/data/2025/2504_05xxx/2504.05747/full.md b/data/2025/2504_05xxx/2504.05747/full.md new file mode 100644 index 0000000000000000000000000000000000000000..6292a968ab0d78f8c1e9a38b672444f29115ec9d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/full.md @@ -0,0 +1,371 @@ +![](images/9f6b9f3761ad7213c0ce54d33552c028e28aae4e28023f2ef7dc3583c49097e7.jpg) + +# SEA-LION: Southeast Asian Languages in One Network + +Raymond Ng\*, Thanh Ngan Nguyen\*, Yuli Huang\*, Ngee Chia Tai\*, Wai Yi Leong\*, Wei Qi Leong\*, Xianbin Yong\*, Jian Gang Ngui\*, Yosephine Susanto\*, Nicholas Cheng\*, Hamsawardhini Rengarajan\*, Peerat Limkonchotiwat\*, Adithya Venkatadri Hulagadri\*, Kok Wai Teng\*, Yeo Yeow Tong\*, Bryan Siow\*, Wei Yi Teo\*, Wayne Lau\*, Choon Meng Tan\*, Brandon Ong\*, Zhi Hao Ong\*, Jann Railey Montalan\*, Adwin Chan\*, Sajeban Antonyrex\*, Ren Lee\*, Esther Choa\*, David Ong Tat-Wee\*, Bing Jie Darius Liu\*, William Chandra Tjhi\*, Erik Cambria\*, Leslie Teo\* AI Singapore, National University of Singapore \*Nanyang Technological University https://sea-lion.ai + +# Abstract + +Recently, Large Language Models (LLMs) have dominated much of the artificial intelligence scene with their ability to process and generate natural languages. However, the majority of LLM research and development remains English-centric, leaving low-resource languages such as those in the Southeast Asian (SEA) region underrepresented. To address this representation gap, we introduce Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, two cutting-edge multilingual LLMs designed for SEA languages. The SEA-LION family of LLMs supports 11 SEA languages, namely English, Chinese, Indonesian, Vietnamese, Malay, Thai, Burmese, Lao, Filipino, Tamil, and Khmer. Our work leverages large-scale multilingual continued pre-training with a comprehensive post-training regime involving multiple stages of instruction fine-tuning, alignment, and model merging. Evaluation results on multilingual benchmarks show that our models achieve state-of-the-art performance across LLMs supporting SEA languages. We open-source the models to benefit the wider SEA community. + +# 1 Introduction + +Large language models (LLMs) have significantly transformed the field of natural language processing, achieving remarkable performance in text generation, summarization and sentiment analysis (Brown et al., 2020; OpenAI, 2023; Dubey et al., 2024; Rivière et al., 2024; Zhang et al., 2024b; Yeo et al., 2024). Despite their impressive capabilities, most LLMs remain heavily English-centric (Wendler et al., 2024; Zhong et al., 2024). Unfortunately, this situation has led LLMs in regions with many under-represented languages such + +as Southeast Asia (SEA) to suffer. Languages with lower resources, such as Filipino, Lao, Burmese and Khmer in the SEA region, are not supported by many open-source English-centric LLMs. This underscores the need to bridge the resource and representation gap between English and SEA languages. + +Recently, there have been many attempts to create multilingual LLMs in an open-source manner, e.g., BLOOM (Scao et al., 2022), a project aimed at increasing multilingual presence in opensource LLMs by supporting 46 languages. Popular LLM families such as Llama (Dubey et al., 2024), Gemma (Rivière et al., 2024) and Qwen (Yang et al., 2024a) have also introduced multilingual LLMs for their latest iteration. During our evaluations, we found that the performance of these models is acceptable in the general case, i.e., when considering evaluation benchmarks formulated from English datasets. However, we observe that the performance degrades on SEA-specific benchmarks. Moreover, researchers have also introduced LLMs such as SeaLLMs (Nguyen et al., 2024; Zhang et al., 2024a) and Sailor (Dou et al., 2024) to specifically address the LLM gap in SEA languages. However, the performance of these models is less than ideal for languages such as Thai or Tamil $^{2}$ (10X et al., 2024; AI Products Team, 2024). + +In this paper, we address the issues by proposing a robust open-source Southeast Asian model with data transparency for reproducibility, namely SEA-LION - a family of LLMs continued pretrained (CPT) and fine-tuned on Llama-3.1-8B-Instruct for Llama-SEA-LION-8B-IT and Gemma2-9B for Gemma-SEA-LION-9B-IT with a focus + +on SEA languages. To tackle the performance problem, we utilize 200 billion English, code, and SEA languages tokens as well as 16.8 million English and SEA languages instruction and answer pairs for CPT and post-training steps, respectively, to achieve a significant improvement in SEA languages. In order to allow our models to be used by everyone without restrictions, we release our models under the fully open MIT license. We benchmark our models against the SEA-HELM(Susanto et al., 2025) and Open LLM Leaderboard3 with other LLMs of similar sizes in Southeast Asia like Sailor 2 (Team, 2024) and SeaLLMs 3 (Zhang et al., 2024a), where our models achieve state-of-the-art performances. + +We summarize the contribution of our paper as follows. + +- We released two LLMs, Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, that are meticulously trained to accurately represent the unique linguistic diversity of SEA languages. +- We also provide in-depth insights in this paper into our end-to-end training workflow to benefit the community developing multilingual LLMs. +- We present a reproducible dataset development process, covering sourcing and the model training process. We release our training artifacts, including the training dataset, training scripts, training checkpoints, and fine-tuned models, including Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, to provide strong baselines, promote reproducibility, and enable future research on applications that require SEA-specific knowledge4. + +# 2 Continued pre-training (CPT) + +# 2.1 Pre-training data + +The CPT data consists of a curated set of English, multilingual, and code corpora from several open source repositories like Dolma (Soldaini et al., 2024), FineWeb (Penedo et al., 2024), the-stackv2 (Lozhkov et al., 2024), SEA-LION-Pile (AI Singapore, 2023), SEA-LION-Pilev2 (AI Singapore, 2025), as well as documents from CommonCrawl (CommonCrawl, 2024) and from the public domain, such as Wikipedia (Foun + +dation, 2024). For SEA-LION-Pilev2, we filter CommonCrawl WARC data for documents in SEA languages (i.e., Burmese, Simplified Chinese, Indonesian, Khmer, Lao, Malay, Filipino, Tamil, Thai, and Vietnamese) using the pretrained fast-text language classifier (Joulin et al., 2017). + +A document is retained if the language code reported in its metadata matches that of one of the aforementioned SEA languages. Additionally, we further clean up the data with Trafilatura (Barbaresi, 2021). To determine the optimal dataset ratio between SEA languages, code, and English for the CPT process, we conduct a series of small-scale CPT experiments, each with a training budget of 10 billion tokens and varying proportions of English, code, and SEA language data. We settled on an optimal data mix ratio of $55\%$ SEA languages, $25\%$ English, and $20\%$ code tokens for a budget of 200 billion tokens. For a detailed breakdown of the token count by languages, please refer to Table 6. + +# 2.2 CPT process + +Model selection. For the models to CPT from, we choose Llama-3.1-8B-Instruct (Dubey et al., 2024) and Gemma-2-9B (Rivière et al., 2024). + +Training setup. Following previous works (Dou et al., 2024), we use BPE-Dropout (Provilkov et al., 2020) to increase the performance and robustness of the training. We use a Warmup-Stable-Decay (WSD) (Hu et al., 2024) scheduler with warm-up and cooldown phases each representing $10\%$ of the entire training budget. We use the AdamW (Loshchilov and Hutter, 2019) optimizer with the maximum learning rate (LR) set to $1e^{-5}$ and the final LR after cooldown is $1e^{-7}$ . Following Wortsman et al. (2024), we set epsilon to $1e^{-15}$ . We use Composer (Team, 2021) and LLM Foundry (Team, 2022) for distributed training using Fully Sharded Data Parallel (Zhao et al., 2023) on a cluster of eight nodes of the p5.48xlarge instance from Amazon Web Services (AWS). The total training duration was approximately 6 days and 10 days for the Llama 3.1 and Gemma 2 models, respectively. In this paper, we refer to the post-CPT models as Llama-SEA-LION-8B and Gemma-SEA-LION-9B for the Llama 3.1 and Gemma 2 continued pre-trained models, respectively. + +# 3 Post-training + +# 3.1 Post-training data + +The post-training data consists of 3 subsets of data for Stage 1 IFT, Stage 2 IFT, and the Preference dataset for alignment, respectively. We describe the training data information of each step as follows. + +Stage 1 IFT. In this step, we employ Infinity-Instruct [Foundation and Chat] (Beijing Academy of Artificial Intelligence, 2024) and OpenMath-Instruct 2 (Toshniwal et al., 2024) to improve the mathematical, reasoning, and coding skills of the instruction model. The full details of the training data are shown in Appendix 7. + +Stage 2 IFT. Then, in this step, we use generalized large-scale instructions on the previous instruction model. In particular, we employ 22 existing datasets (written in English, Thai, and Vietnamese) and formulate new 22 synthetic datasets using various models and techniques to create SEA instruction datasets (see Appendix A.3 for the full data generation details). As shown in Appendix 9, we use a total of 7,298,828 instruction samples that cover 11 languages. + +Helpfulness and preference alignment. We also conduct an alignment learning on top of the instruction model using a feedback dataset called UltraFeedback (Cui et al., 2024). In addition, we also synthesized the SEA version of the UltraFeedback using NemoTron-70b with Gemma2 as a reward model, see Appendix A.4 for the full details. + +![](images/9138939f8b29aa04aaea835f7a6d240c1c7202541e014a22e29a7fadfe0d65fb.jpg) +Figure 1: Training process of Llama-SEA-LION-8B-IT (Section 3.2.1). The post-training process consists of 2 stages of instruction fine-tuning, an alignment stage and multiple merge stages. Dotted lines denote a merge stage and solid lines denote an alignment stage. + +# 3.2 Post-training process + +We use LLaMaFactory (Zheng et al., 2024b) with DeepSpeed (Rasley et al., 2020) for all Instruction Fine Tuning (IFT) and alignment steps. All IFT stages are performed using full model finetuning, where the models are from the previous step (Section 2.2) and existing models. We use MergeKit (Goddard et al., 2024) with a value of 1 for weight and density parameters for all merge steps. Models selected for merging are selected empirically, based on the openness of model licenses, the suitability for merging and performance. + +# 3.2.1 Llama-SEA-LION-8B-IT + +Stage 1 IFT As shown in Figure 1, we started off the post-training phase with IFT of Llama-SEA-LION-8B with the Infinity Instruct (Foundation) (Beijing Academy of Artificial Intelligence, 2024) and OpenMathInstruct2 (Toshniwal et al., 2024) datasets. Both datasets contain approximately 9.5 million instruction pairs, primarily in English and centered around reasoning, math, and code. We refer to the model at this stage as Stage1-Llama. + +Stage 2 IFT We performed a second round of IFT using the SEA-Instruct dataset, which consists of approximately 7.3 million instruction pairs, of which 5 million instruction pairs are generated using the Gemma-2-27B-Instruct (Rivière et al., 2024) model and the Qwen2.5-32B-Instruct model (Yang et al., 2024a) in SEA languages. The remaining are English language instruction pairs from the Infinity-Instruct (Chat) (Beijing Academy of Artificial Intelligence, 2024) dataset. We refer to the model at this stage as Stage-2-Llama. + +First merge After finishing the IFT stages, we performed the first of a series of merges by merging Stage-1-Llama and Stage-2-Llama into the Llama-SEA-LION-8B using the DARE TIES (Yu et al., 2024; Ilharco et al., 2023) method. We refer to the model at this stage as Merge-1-Llama. + +Second merge In order to mitigate catastrophic forgetting due to the fine-tuning process (Alexandrov et al., 2024), we performed the second round of merging by merging top-performing instruction-tuned models that share the Llama 3.1 lineage. We merge the original Llama-3.1-8B-Instruct, Llama3-8B-SEA-LION-v2.1-Instruct (SEA-LION Team, 2024), and SuperNova-Lite (Arcee-AI, 2024) into Merge-1-Llama using the Consensus TA (Wang et al., 2024b; Ilharco et al., 2023) merge method. + +We refer to the model at this stage as Merge-2-Llama. + +Helpfulness and preference alignment We performed one round of alignment on Merge-2-Llama using SimPO (Meng et al., 2024) with the SEA-Preference dataset. We refer to the model at this stage as Aligned-SimPO-Llama. + +Final merge Lastly, we perform a merge using the DELLA-Linear merge. With the original Llama3.1-8B-Instruct model as the base for merging, we merge in Merge-2-Llama and Aligned-SIMPO-Llama to produce the final model, Llama-SEA-LION-v3-9B-IT. + +# 3.2.2 Gemma-SEA-LION-9B-IT + +![](images/4c339bc0c829686d5690eeb36036737c9fc9fc6bbb2f0e5a2fc19dd66e119f36.jpg) +Figure 2: Training process of Gemma-SEA-LION-9B-IT (Section 3.2.2). The post-training process comprises two stages of instruction fine-tuning, an alignment stage, and multiple merge stages. Dotted lines denote a merge stage and solid lines denote an alignment stage. + +Stage 1 and Stage 2 IFT Similar to the Llama-SEA-LION-8B-IT, we started off the post-training phase with both stages of IFT using the same datasets on the Gemma-2-9B model (Rivière et al., 2024). We refer to both models at stage 1 and stage 2 as Stage-1-Gemma and Stage-2-Gemma, respectively. + +First merge We merge the Gemma-2-9B-IT (Rivière et al., 2024) and Stage-2-Gemma into Gemma-2-9B using the DELLA Linear method. We refer to the model at this stage as the Merge-1-Gemma. + +Helpfulness and preference alignment Using the Merge-1-Gemma as the base model, we performed one round of alignment using SimPO with the SEA-Preference dataset. We refer to the model at this stage as the Aligned-SimPO-Gemma. + +Final merge Finally, using the Gemma-2-9B model as the base model, we merged Merge-1-Gemma, FuseChat Gemma-2-9B-Instruct (Yang + +et al., 2024b), Gemma-SEA-LION-9B, and Aligned-SimPO-Gemma into it to produce the final model Gemma-SEA-LION-9B-IT. + +# 3.3 Discussion + +This post-training workflow emphasizes the careful balance between general capabilities, SEA-specific linguistic fluency, and natural conversational abilities. Each step in the workflow is designed to progressively refine the model, ensuring it meets the diverse needs of users in the Southeast Asian region. + +The entire post-training process for Gemma-SEA-LION-9B-IT and Llama-SEA-LION-8B-IT took approximately 1350 and 1024 GPU hours, respectively, on eight H100 GPUs. To make the training efficient, all post-training steps utilize Liger Kernel (Hsu et al., 2024) for substantial memory savings of approximately $60\%$ . + +# 4 Experimental Setup + +# 4.1 Competitive methods + +For the evaluation, we compared our models against well-known LLMs for multilingual and SEA languages, such as SeALMsv3 (Zhang et al., 2024a), Sailorv2 (Team, 2024), Qwen 2.5 (Yang et al., 2024a), Gemma 2 (Riviere et al., 2024) and Llama 3.1 (Dubey et al., 2024), where the parameters of those models are less than 10 billion parameters, similar to our models. + +# 4.2 Evaluation Benchmarks + +To evaluate the robustness of our proposed models, we compare our models to competitors in three benchmarks. + +SEA Benchmarks. We evaluated the multilingual performance of each LLM using the SEA-HELM Leaderboard (Leong et al., 2023; Susanto et al., 2025) $^{5}$ . We selected SEA-HELM because the design choice of this benchmark reflects the performance of SEA culture and knowledge the most compared with other existing benchmarks (DAMO-NLP-SG, 2024; Lovenia et al., 2024; Wang et al., 2024a). We also evaluate on a wide-range SEA coverage language benchmark called SEACrowd (Lovenia et al., 2024). This benchmark consists of all SEA languages for natural language understanding and generation datasets. + +SEA-HELM + +
NU, NLG, NLR, NLIInstruction Following
ModelsAverageIDVITHTAIDVITH
Meta-Llama-3.1-8B35.3742.3340.6735.1338.8816.1919.059.00
SeaLLMs-v3-7B37.0444.7948.2943.5327.4526.6735.2426.00
Gemma-2-9B41.4847.6543.2842.0053.264.763.8110.00
Qwen2.5-7B41.9851.6352.1746.5536.6031.4336.1930.00
Sailor2-8B42.6253.2347.3346.6445.0430.4830.4835.00
Llama-SEA-LION-8B41.4244.9846.2542.7943.0325.7132.3823.00
Gemma-SEA-LION-9B48.6757.1649.3947.1660.5625.7120.0027.00
+ +Table 1: SEA-HELM multilingual benchmark on NLU, NLG, NLR, NLI and instruction following on base and continued pre-trained models of similar sizes. +Open LLM Leaderboard + +
ModelsAverageMMLU-PROBBHGPQAMATH Lvl 5IFEval (EN)MUSR
Meta-Llama-3.1-8B13.924.9525.296.325.1412.78.98
Sailor2-8B17.7125.7427.624.877.0221.9519.03
Gemma-2-9B21.1534.4834.110.5113.1420.414.3
SeaLLMs-v3-7B24.0035.7134.579.2818.8132.9412.68
Qwen2.5-7B24.9937.3935.819.9618.8833.7414.14
Llama-SEA-LION-8B16.6127.626.047.499.8916.5612.07
Gemma-SEA-LION-9B22.4132.7837.2410.299.8930.1214.11
+ +Table 2: Open LLM Leaderboard benchmarks across different continued pre-trained models of similar sizes. + +However, due to maintenance reasons, we cannot reproduce the NLG benchmark of SEACrowd. Therefore, we experiment only with the NLU benchmark (zero-shot), which has 131 data subsets, 7 tasks, and 31 SEA indigenous languages. + +English performance. We also evaluated the English performance of the models using the Open LLM Leaderboard (HuggingFace, 2024). This is because English is also widely used in SEA countries. Therefore, we need to evaluate the understanding and knowledge of LLMs in the English benchmark as well. The leaderboard consists of six benchmarks, IFEval (Zhou et al., 2023), Big Bench Hard (Suzgun et al., 2023), MATH (Hendrycks et al., 2021), GPQA (Rein et al., 2023), MuSR (Sprague et al., 2024) and MMLUPRO (Wang et al., 2024c). Moreover, we also evaluate the CPT models on SEA-HELM and the Open LLM Leaderboard since these benchmarks support the CPT evaluation. + +# 5 Experimental Results + +To understand the robustness and generalization of our proposed models, we conduct three studies as follows. Section 5.1 evaluates the robustness of continual pre-training models using SEA-HELM + +and the Open LLM leaderboard. In Section 5.2, we compare our instruction fine-tuning models with competitors in three benchmarks to demonstrate the generalization of our models. Lastly, we discuss the design choice of our models in Section 5.3. + +# 5.1 Continued Pre-Training Results + +SEA performance. The CPT stage is primarily focused on gaining SEA language capabilities and knowledge. For the purpose of comparison against base and CPT models, as shown in Table 1, we observed a 6.05 and 7.19 average SEA-HELM performance increase over the Meta-Llama-3.1-8B and Gemma-2-9B for Llama-SEA-LION-8B and Gemma-SEA-LION-9B, respectively. We observed a much larger average increase with instruction following capabilities in particular, which we attribute to the fact that our CPT models are trained from the instruction models rather than from the base models. Moreover, in the average performance, we found that our Gemma-SEA-LION-9B models perform the best compared to other models. This emphasizes a strong reason to perform CPT for improving the performance of SEA languages, rather than skipping the CPT and performing SFT directly. + +SEA-HELM + +
NUL, NLG, NLR, NLIInstruction FollowingMTBench
ModelsAverageIDVITHTAIDVITHIDVITH
SeaLLMs-v3-7B-Chat39.1942.7248.5042.5912.0657.1453.3347.0059.8165.2456.59
Llama-3.1-8B-Instruct41.4851.5051.3145.3215.4077.1475.2463.0056.3857.5954.34
Sailor2-8B-Chat43.1348.9848.0145.4428.2949.5245.7140.0069.7666.9773.94
Qwen2.5-7B-Instruct44.5860.2853.4653.4321.0381.9069.5266.0065.6666.8068.71
Gemma-2-9B-IT55.3364.0459.8657.2252.2888.5778.1071.0068.7868.3773.51
Stage-1-Llama50.7651.8451.8346.2327.5369.5273.3359.0042.7446.4146.46
Stage-2-Llama59.4953.8755.1850.9244.8077.1476.1967.0050.9053.7246.97
Merge-1-Llama59.3656.7356.8251.7146.6381.9082.8667.0057.0454.0150.28
Merge-2-Llama58.0159.1952.6351.8935.4087.6280.9578.0056.3859.3258.86
Aligned-SimPO-Llama51.3054.8651.6946.7726.4082.8680.0068.0068.2064.6864.92
Llama-SEA-LION-8B-IT61.8460.5061.4855.9243.6184.7685.7176.0062.6568.3265.13
Stage-1-Gemma56.5655.0654.5151.9642.7466.6774.2961.0047.3547.2655.05
Stage-2-Gemma66.6664.1061.7656.9057.8589.5282.8676.0060.5458.9358.76
Merge-1-Gemma69.2666.2564.9559.7460.4189.5291.4382.0066.4564.4765.00
Aligned-SimPO-Gemma69.3765.6965.4759.5157.3886.6788.5778.0068.8973.6773.51
Gemma-SEA-LION-9B-IT69.3566.2664.9359.2358.8294.2988.5778.0065.8573.2769.07
+ +![](images/95807f122e753274efc4c43da49e9d40d32d3ff051642f5e33ffc65fc2dd8d5a.jpg) +Figure 3: Zero-shot model performance across NLU tasks in SEA languages. + +Table 3: SEA-HELM multilingual benchmark on NLU, NLG, NLR, NLI, instruction following and multi-turn chat on instruct models of similar sizes. + +
ModelNLU Score
SeaLLMs-v3-7B-chat52.68
Llama-3.1-8B-Instruct49.94
Sailor2-8B-Chat60.21
Qwen2.5-7B-Instruct54.51
Gemma-2-9B-IT60.21
Llama-SEA-LION-8B-IT55.10
Gemma-SEA-LION-9B-IT64.13
+ +Table 4: The average NLU performance across 131 data subsets and 31 indigenous languages. + +English performance. For the English performance, as shown in Table 2, both CPT models also managed to perform competitively against the Meta-Llama-3.1-8B and Gemma-2-9B base models on the Open LLM Leaderboard benchmarks. This indicates that our choice of retraining with a proportion of $25\%$ English tokens has been beneficial in mitigating catastrophic forgetting, which has been shown to stem from CPT (Zheng et al., 2024a). Although our CPT models perform lower than Qwen and SeaLLMs on this benchmark, we outperform them on the SEA language instead, which is the main focus of this work. + +# 5.2 Instruction Fine-tuning Results + +In this study, we compare our models with competitors on SEA-HELM, SEACrowd, and the Open LLM Leaderboard as follows. + +SEA-HELM. As shown in Table 3, the SEA-HELM benchmark performance demonstrates that + +our instruct models, Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, attain competitive performance in SEA languages, with Gemma-SEA-LION-9B-IT achieving one of the highest average performances. Moreover, we significantly improve the performance of Llama-3.1-8B-Instruct from 41.48 to 61.84 using Llama-SEA-LION-8B-IT, while Gemma-SEA-LION-9B-IT achieves 14.02 improvement points compared to Gemma-2-9B-IT. Both Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT outperform other SEA languages-focused LLMs, such as *Sailor2-8B-Chat* and *SEALLMs-v3-7B-Chat*, with an average score of 69.35 across all the languages covered by the SEAHELM benchmark, apart from the SEA-MTbench tasks. This conforms with the previous results on the CPT models (Section 5.1) that our CPT model performs the best on SEA languages, resulting in the best performer in this experiment. + +SEACrowd. Other than evaluating on some SEA + +Open LLM Leaderboard + +
ModelsAverageMMLU-PROBBHGPQAMATH Lvl 5IFEval (EN)MUSR
Sailor2-8B-Chat16.3727.9327.153.470.0037.492.19
SeaLLMs-v3-7B-Chat22.4933.9324.377.2715.8644.109.38
Llama-3.1-8B-Instruct27.8829.3626.1010.6317.4577.036.75
Qwen2.5-7B-Instruct27.9337.0034.7210.180.0076.349.34
Gemma-2-9B-IT28.8631.9542.1414.770.2374.369.74
Stage-1-Llama24.5125.8726.327.8319.2662.894.88
Stage-2-Llama27.7528.1024.647.7219.5678.787.74
Merge-1-Llama27.4927.4726.228.2819.7976.167.04
Merge-2-Llama29.9629.9228.789.9619.9482.618.54
Aligned-SimPO-Llama30.5830.8434.318.3926.5975.767.61
Llama-SEA-LION-8B-IT30.3931.0129.4710.4022.5880.358.54
Stage-1-Gemma29.8833.3438.5110.7424.1756.8715.66
Stage-2-Gemma33.4834.6736.0611.7420.7783.0014.61
Merge-1-Gemma35.1536.2241.4215.3226.2882.099.59
Aligned-SimPO-Gemma35.3137.6542.3814.9927.7980.238.82
Gemma-SEA-LION-9B-IT35.4336.9443.3915.1024.2481.8511.07
+ +Table 5: Open LLM Leaderboard benchmarks across different instruct models of similar sizes. + +languages like SEA-HELM, we also evaluated our model compared to competitors on 31 SEA indigenous languages using SEACrowd-NLU. Note that, for this study, we use only the best settings of our models from the previous experiment (Table 3). As shown in Table 4, we observe a state-of-the-art result from Gemma-SEA-LION-9B-IT by achieving 64.13 points on the NLU benchmark, while Llama-SEA-LION-8B-IT improves its baseline from 49.94 to 55.10 points. Moreover, the results from Figure 3 also emphasize the robustness of our model by reaching more than 80 points on this benchmark, while SeaLLMs and Llama-3.1 have only a few cases where the performance exceeds 80 points. These results emphasize the robustness of our models by achieving the state-of-the-art with a model parameter less than 10B on SEA benchmarks, including both traditional classical NLP benchmark (SEACrowd-NLU) and modern LLM benchmark (SEA-HELM). + +English performance. We also evaluate the performance of a widely used language, English, to observe a difference between the results of SEA and English. The Open LLM Leaderboard performance is shown in Table 5. Both Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT performed competitively in English language, math, and reasoning tasks, with Gemma-SEA-LION-9B-IT achieving the highest average score of 35.43. Moreover, we notice that the SEA models (Sailor and SeaLLMs) failed to perform on the English dataset. This might be because these models are optimized for SEA languages during supervised fine-tuning, and English + +performance decreased as a result. In contrast, our models balance the performance between SEA and English knowledge, resulting in a high score for all benchmarks. + +# 5.3 Performance Analysis + +In this study, we discuss the performance improvement in each design decision of our models (Tables 3 and 5) as follows. + +Stage 1: English instruction fine tuning In Stage 1 IFT, the focus is predominantly on gaining general capabilities in math, code and general instruction following in the English language. Although our CPT models are based off of the instruct versions of Llama-3.1-8B, the CPT process has eroded the instruction following capabilities (See Table 5). We observe an increase of 3.86 and 9.72 for Stage-1-Llama and Stage-1-Gemma respectively in English instruction following capabilities on the IFEval benchmark. We also observe an average increase of 7.9 for Stage-1-Llama and 7.47 for Stage-1-Gemma for the SEA-HELM benchmark. + +Stage 2: Multilingual instruction fine tuning In Stage 2 IFT, the focus is on multilingual and reasoning capabilities. By instruction fine tuning on SEA languages and higher complexity English instruction pairs, the Stage 2 models saw an average increase of 8.73 for Stage-2-Llama and 10.1 for Stage-2-Gemma over Stage 1 models on the SEAHELM benchmark. + +Merge 1: Combining Stage 1 and Stage 2 Despite the significant gains observed in Stage 1 and 2, we observed that the effects of catastrophic for + +getting from earlier stages could still be observed after Stage 2. In order to mitigate this, we merge Stage 1 and Stage 2 models into the CPT model, after which we we observed an average increase of 2.6 for Merge-1-Gemma. We also observed an increase across all SEA-HELM benchmark tasks for Merge-1-Llama. + +Merge 2: Incorporating instruct models To reintroduce helpfulness, relevance and informativeness of responses observed in Llama 3.1 and Gemma 2 models, we perform further merges of open-source instruct models. While we observed significant increases in MT-Bench benchmark scores for Vietnamese and Thai, we also observed a slight degradation of average SEA-HELM performance as well as a slight degradation of Indonesian MT-Bench scores, which we view as acceptable trade-offs for the significant performance increases in Vietnamese and Thai. + +Alignment steps In the alignment step to align the models to human preference, we prioritize the SEA MTBench performance over the other SEA-HELM benchmark tasks. We observed a broad increase in SEA MTBench performances across all languages for both models. However, this comes with minor degradation of instruction following capabilities and overall Indonesian SEA-HELM performance. The alignment step encourages longer, more helpful and sensitive responses but hurts performance on task-specific benchmarks and instruction following in some languages – an issue we address in the next step. + +Final merge: Combining aligned models To compensate for the capability degradation in the previous steps, we merge Merge-2-Llama and Merge-1-Gemma with Aligned-SimPO-Llama and Aligned-SimPO-Gemma and various open sourced pretrained models describe in sections 3.2.1 and 3.2.2 for their respective model families. For Llama-SEA-LION-8B-IT, we observed a significant increase in average SEA-HELM performance (61.84) from the alignment stage (51.30), mainly from the increase in performance for the core tasks in SEA-HELM. This performance increase demonstrates the value of empirical selection of pre-trained models to be merged in based on each model's strengths and weaknesses to produce a far superior model. For Gemma-SEA-LION-9B-IT, it easily achieves higher performance compared to the Llama-SEA-LION-8B-IT with fewer post training steps. We attribute this performance to the high performance of the base Gemma 2 model and also to the larger vocab + +ulary size which have been demonstrated (Takase et al., 2024) to produce better models. + +# 6 Related Works + +Recently, researchers have proposed large language models that support multilingual settings. Llama (Dubey et al., 2024) is the prior effort to release an open-source large language model for the research community to develop their own models. Then, Qwen (Yang et al., 2024a) and Gemma (Rivière et al., 2024) introduced open-source LLMs that perform comparably or better than Llama with a larger amount of training data and many supported languages for these recent models. Massively multilingual open-source models like Bloom (Scao et al., 2022) and Aya (Ustun et al., 2024) also support a very wide range of languages, including some SEA languages. Although these models demonstrate a robust performance in English benchmarks, they mostly underperformed on SEA benchmarks that tested for SEA languages, SEA knowledge and cultural understanding (Lovenia et al., 2024; Susanto et al., 2025), presumably due to a lack of language support for certain SEA languages or cultures. + +In the SEA community, many works propose a large language model that is designed specifically for SEA languages by adding more SEA tokens in the training process, such as SeaLLMs (Nguyen et al., 2024) and Sailor (Sailor2 Team, 2024). However, the performance of these models is robust only on in-domain datasets or favors only some tasks (i.e., classical NLP datasets). This is because the design choice in the pre-training or fine-tuning of these models is not well studied, e.g., performing a single SFT step with low-quality datasets written in some SEA languages, resulting in a slight improvement on SEA benchmarks. To create a robust SEA LLM, we need to carefully balance language representation and design both pre-training and post-training (i.e., SFT, alignment, and model merging) for SEA contexts. + +# 7 Conclusion + +Despite the sizable population and language diversity in Southeast Asia, there remains a scarcity of resources and accurate linguistic and cultural representation with open-source LLMs. In this paper, we introduce Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, two multilingual LLMs comprehensively trained to achieve state-of-the-art performances in SEA languages, based on the Llama and + +Gemma family of LLMs. SEA-LION represents the next advancement in the development of LLMs that explicitly supports SEA languages. Both models are fully open-source and available for commercial use to increase accessibility and innovation in multilingual LLMs in Southeast Asia. We will make our resources publicly available — including the dataset, training scripts, training checkpoints, and all fine-tuned models, even those that achieve state-of-the-art performance on the benchmarks — to establish solid baselines, ensure reproducibility, and support future research focused on culturally and professionally relevant SEA applications. + +# Acknowledgment + +This research is supported by the National Research Foundation, Singapore, under its National Large Language Models Funding Initiative. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author(s) and do not reflect the views of the National Research Foundation, Singapore. + +# Limitation + +Although we propose the state-of-the-art SEA LLMs, we found that the benchmark might not cover all the properties and languages we want to evaluate. For example, SEA-HELM is a robustness benchmark, but only covers four languages. SEACrowd is a benchmark that covers all SEA languages, but it is only classical NLP datasets (no chat or instruction following datasets). We require a more holistic SEA benchmark that covers LLM-specific tasks written in all SEA languages. However, with the current evaluation design choice, these benchmarks are the best design choice for current SEA research works. + +Moreover, we conduct experiments using only 8 and 9 billion parameter models. We argue that this is the most commonly used model size in real-world scenarios. In addition, our method can and should also work with a higher or smaller model size since our proposed technique does not rely on the model size, as we demonstrated by applying the SFT and alignment techniques on both Llama and Gemma models. + +# References + +SCB 10X, VISTEC, and SEACrowd. 2024. Thai llm leaderboard. + +AI Singapore AI Products Team. 2024. Sea-helm. +AISG AI Singapore. 2023. Sea-lion-pile. +AISG AI Singapore. 2025. Sea-lion-pile-v2. +Anton Alexandrov, Veselin Raychev, Mark Niklas Mueller, Ce Zhang, Martin Vechev, and Kristina Toutanova. 2024. Mitigating catastrophic forgetting in language transfer via model merging. In Findings of the Association for Computational Linguistics: EMNLP 2024, pages 17167-17186, Miami, Florida, USA. Association for Computational Linguistics. +Arcee-AI. 2024. Llama-3.1-supernova-lite. +Adrien Barbaresi. 2021. Trafilatura: A Web Scraping Library and Command-Line Tool for Text Discovery and Extraction. In Proceedings of the Joint Conference of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing: System Demonstrations, pages 122-131. Association for Computational Linguistics. +BAAI Beijing Academy of Artificial Intelligence. 2024. Infinity instruct. +Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners. In Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual. +CommonCrawl. 2024. Commoncrawl. +Ganqu Cui, Lifan Yuan, Ning Ding, Guanming Yao, Bingxiang He, Wei Zhu, Yuan Ni, Guotong Xie, Ruobing Xie, Yankai Lin, Zhiyuan Liu, and Maosong Sun. 2024. ULTRAFEEDBACK: boosting language models with scaled AI feedback. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net. +DAMO-NLP-SG. 2024. Seaexam. +Longxu Dou, Qian Liu, Guangtao Zeng, Jia Guo, Jiahui Zhou, Wei Lu, and Min Lin. 2024. *Sailor: Open language models for south-east asia. CoRR*, abs/2404.03608. +Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, Anirudh Goyal, Anthony Hartshorn, Aobo Yang, + +Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, Aurélien Rodriguez, Austen Gregerson, Ava Spataru, Baptiste Rozière, Bethany Biron, Binh Tang, Bobbie Chern, Charlotte Caucheteux, Chaya Nayak, Chloe Bi, Chris Marra, Chris McConnell, Christian Keller, Christophe Touret, Chunyang Wu, Corinne Wong, Cristian Canton Ferrer, Cyrus Nikolaidis, Damien Alonsius, Daniel Song, Danielle Pintz, Danny Livshits, David Esiobu, Dhruv Choudhary, Dhruv Mahajan, Diego Garcia-Olano, Diego Perino, Dieuwke Hupkes, Egor Lakomkin, Ehab AlBadawy, Elina Lobanova, Emily Dinan, Eric Michael Smith, Filip Radenovic, Frank Zhang, Gabriel Synnaeve, Gabrielle Lee, Georgia Lewis Anderson, Graeme Nail, Grégoire Mialon, Guan Pang, Guillem Cucurell, Hailey Nguyen, Hannah Korevaar, Hu Xu, Hugo Touvron, Iliyan Zarov, Imanol Arrieta Ibarra, Isabel M. Kloumann, Ishan Misra, Ivan Evtimov, Jade Copet, Jaewon Lee, Jan Geffert, Jana Vranes, Jason Park, Jay Mahadeokar, Jeet Shah, Jelmer van der Linde, Jennifer Billock, Jenny Hong, Jenya Lee, Jeremy Fu, Jianfeng Chi, Jianyu Huang, Jiawen Liu, Jie Wang, Jiecao Yu, Joanna Bitton, Joe Spisak, Jongsoo Park, Joseph Rocca, Joshua Johnstun, Joshua Saxe, Junteng Jia, Kalyan Vasuden Alwala, Kartikeya Upasani, Kate Plawiak, Ke Li, Kenneth Heafield, Kevin Stone, and et al. 2024. The llama 3 herd of models. CoRR abs/2407.21783. + +Wikipedia Foundation. 2024. Wikipedia enterprise. html dumps downloads. + +Charles Goddard, Shamane Siriwardhana, Malikeh Ehghaghi, Luke Meyers, Vladimir Karpukhin, Brian Benedict, Mark McQuade, and Jacob Solawetz. 2024. Arcee's mergekit: A toolkit for merging large language models. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: EMNLP 2024 - Industry Track, Miami, Florida, USA, November 12-16, 2024, pages 477-485. Association for Computational Linguistics. + +Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021. Measuring mathematical problem solving with the MATH dataset. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual. + +Pin-Lun Hsu, Yun Dai, Vignesh Kothapalli, Qingquan Song, Shao Tang, Siyu Zhu, Steven Shimizu, Shivam Sahni, Haowen Ning, and Yanning Chen. 2024. Liger kernel: Efficient triton kernels for lIm training. arXiv preprint arXiv:2410.10989. + +Shengding Hu, Yuge Tu, Xu Han, Chaoqun He, Ganqu Cui, Xiang Long, Zhi Zheng, Yewei Fang, Yuxiang Huang, Weilin Zhao, Xinrong Zhang, Zhen Leng Thai, Kai Zhang, Chongyi Wang, Yuan Yao, Chenyang Zhao, Jie Zhou, Jie Cai, Zhongwu Zhai, Ning Ding, Chao Jia, Guoyang Zeng, Dahai Li, Zhiyuan Liu, and Maosong Sun. 2024. Minicpm: Un + +veiling the potential of small language models with scalable training strategies. CoRR, abs/2404.06395. + +HuggingFace. 2024. Open llm leaderboard. + +Gabriel Ilharco, Marco Túlio Ribeiro, Mitchell Wortsman, Ludwig Schmidt, Hannaneh Hajishirzi, and Ali Farhadi. 2023. Editing models with task arithmetic. In *The Eleventh International Conference on Learning Representations*, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net. + +Armand Joulin, Edouard Grave, Piotr Bojanowski, and Tomas Mikolov. 2017. Bag of tricks for efficient text classification. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 2, Short Papers, pages 427-431. Association for Computational Linguistics. + +Wei Qi Leong, Jian Gang Ngui, Yosephine Susanto, Hamsawardhini Rengarajan, Kengatharayer Sarveswaran, and William-Chandra Tjhi. 2023. BHASA: A holistic southeast asian linguistic and cultural evaluation suite for large language models. CoRR, abs/2309.06085. + +Ilya Loshchilov and Frank Hutter. 2019. Decoupled weight decay regularization. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net. + +Holy Lovenia, Rahmad Mahendra, Salsabil Maulana Akbar, Lester James V. Miranda, Jennifer Santoso, Elyanah Aco, Akhdan Fadhilah, Jonibek Mansurov, Joseph Marvin Imperial, Onno Kampman, Joel Ruben Antony Moniz, Muhammad Ravi Shulthan Habibi, Frederikus Hudi, Jann Montalan, Ryan Hadiwijaya, Joanito Agili Lopo, William Nixon, Borje Karlsson, James Jaya, Ryandito Diandaru, Yuze Gao, Patrick Amadeus Irawan, Bin Wang, Jan Christian Blaise Cruz, Chenxi Whitehouse, Ivan Halim Parmonangan, Maria Khelli, Wenyu Zhang, Lucky Susanto, Reynard Adha Ryanda, Sonny Lazuardi Hermawan, Dan John Velasco, Muhammad Dehan Al Koutsar, Willy Fitra Hendria, Yasmin Moslem, Noah Flynn, Muhammad Farid Adilazuarda, Haochen Li, Johannes Lee, R. Damanhuri, Shuo Sun, Muhammad Reza Qorib, Amirbek Djanibekov, Wei Qi Leong, Quyet V. Do, Niklas Muennighoff, Tanrada Pansuwan, Ilham Firdausi Putra, Yan Xu, Ngee Tai Chia, Ayu Purwarianti, Sebastian Ruder, William-Chandra Tjhi, Peerat Limkonchotiwat, Alham Fikri Aji, Sedrick Keh, Genta Indra Winata, Ruochen Zhang, Fajri Koto, Zheng Xin Yong, and Samuel Cahyawijaya. 2024. Seacrowd: A multilingual multimodal data hub and benchmark suite for southeast asian languages. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami FL, USA, November 12-16, 2024, pages 5155-5203. Association for Computational Linguistics. + +Anton Lozhkov, Raymond Li, Loubna Ben Allal, Federico Cassano, Joel Lamy-Poirier, Nouamane Tazi, + +Ao Tang, Dmytro Pykhtar, Jiawei Liu, Yuxiang Wei, Tianyang Liu, Max Tian, Denis Kocetkov, Arthur Zucker, Younes Belkada, Zijian Wang, Qian Liu, Dmitry Abulkhanov, Indraneil Paul, Zhuang Li, Wen-Ding Li, Megan Risdal, Jia Li, Jian Zhu, Terry Yue Zhuo, Evgenii Zheltonozhskii, Nii Osae Osae Dade, Wenhao Yu, Lucas Krauß, Naman Jain, Yixuan Su, Xuanli He, Manan Dey, Edoardo Abati, Yekun Chai, Niklas Muennighoff, Xiangru Tang, Muhtasham Oblokulov, Christopher Akiki, Marc Marone, Cheng-hao Mou, Mayank Mishra, Alex Gu, Binyuan Hui, Tri Dao, Armel Zebaze, Olivier Dehaene, Nicolas Patry, Canwen Xu, Julian J. McAuley, Han Hu, Torsten Scholak, Sébastien Paquet, Jennifer Robinson, Carolyn Jane Anderson, Nicolas Chapados, and et al. 2024. Starcoder 2 and the stack v2: The next generation. CoRR, abs/2402.19173. + +Yu Meng, Mengzhou Xia, and Danqi Chen. 2024. Simpo: Simple preference optimization with a reference-free reward. CoRR, abs/2405.14734. + +Xuan-Phi Nguyen, Wenxuan Zhang, Xin Li, Mahani Aljunied, Zhiqiang Hu, Chenhui Shen, Yew Ken Chia, Xingxuan Li, Jianyu Wang, Qingyu Tan, Liying Cheng, Guanzheng Chen, Yue Deng, Sen Yang, Chaoqun Liu, Hang Zhang, and Lidong Bing. 2024. SeaLLMs - large language models for Southeast Asia. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), pages 294-304, Bangkok, Thailand. Association for Computational Linguistics. + +OpenAI. 2023. GPT-4 technical report. CoRR, abs/2303.08774. + +Guilherme Penedo, Hynek Kydlicek, Loubna Ben Allal, Anton Lozhkov, Margaret Mitchell, Colin Raffel, Leandro von Werra, and Thomas Wolf. 2024. The fine web datasets: Decanting the web for the finest text data at scale. CoRR, abs/2406.17557. + +Ivan Provilkov, Dmitrii Emelianenko, and Elena Voita. 2020. Bpe-dropout: Simple and effective subword regularization. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, ACL 2020, Online, July 5-10, 2020, pages 1882-1892. Association for Computational Linguistics. + +Jeff Rasley, Samyam Rajbhandari, Olatunjri Ruwase, and Yuxiong He. 2020. Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters. In KDD '20: The 26th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, Virtual Event, CA, USA, August 23-27, 2020, pages 3505-3506. ACM. + +David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. 2023. GPQA: A graduate-level google-proof q&a benchmark. CoRR, abs/2311.12022. + +Morgane Rivière, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, Léonard + +Hussenot, Thomas Mesnard, Bobak Shahriari, Alexandre Ramé, Johan Ferret, Peter Liu, Pouya Tafti, Abe Friesen, Michelle Casbon, Sabela Ramos, Ravin Kumar, Charline Le Lan, Sammy Jerome, Anton Tsitsulin, Nino Vieillard, Piotr Stanczyk, Sertan Girgin, Nikola Momchev, Matt Hoffman, Shantanu Thakoor, Jean-Bastien Grill, Behnam Neyshabur, Olivier Bachem, Alanna Walton, Aliaksei Severyn, Alicia Parrish, Aliya Ahmad, Allen Hutchison, Alvin Abdagic, Amanda Carl, Amy Shen, Andy Brock, Andy Coenen, Anthony Laforge, Antonia Paterson, Ben Bastian, Bilal Piot, Bo Wu, Brandon Royal, Charlie Chen, Chintu Kumar, Chris Perry, Chris Welty, Christopher A. Choquette-Choo, Danila Sinopalnikov, David Weinberger, Dimple Vijaykumar, Dominika Rogozinska, Dustin Herbison, Elisa Bandy, Emma Wang, Eric Noland, Erica Moreira, Evan Senter, Evgenii Eltsyshev, Francesco Visin, Gabriel Rasskin, Gary Wei, Glenn Cameron, Gus Martins, Hadi Hashemi, Hanna Klimczak-Plucinska, Harleen Batra, Harsh Dhand, Ivan Nardini, Jacinda Mein, Jack Zhou, James Svensson, Jeff Stanway, Jetha Chan, Jin Peng Zhou, Joana Carrasqueira, Joana Iljazi, Jocelyn Becker, Joe Fernandez, Joost van Amersfoort, Josh Gordon, Josh Lipschultz, Josh Newlan, Ju-yeong Ji, Kareem Mohamed, Kartikeya Badola, Kat Black, Katie Millican, Keelin McDonell, Kelvin Nguyen, Kiranbir Sodhia, Kish Greene, Lars Lowe Sjösund, Lauren Usui, Laurent Sifre, Lena Heuermann, Leticia Lago, and Lilly McNealus. 2024. Gemma 2: Improving open language models at a practical size. CoRR, abs/2408.00118. + +Sailor2 Team. 2024. Sailor2: Sailing in south-east asia with inclusive multilingual llm. + +Teven Le Scao, Angela Fan, Christopher Akiki, Ellie Pavlick, Suzana Ilic, Daniel Hesslow, Roman Castagné, Alexandra Sasha Luccioni, François Yvon, Matthias Galle, Jonathan Tow, Alexander M. Rush, Stella Biderman, Albert Webson, Pawan Sasanka Ammanamanchi, Thomas Wang, Benoit Sagot, Niklas Muennighoff, Albert Villanova del Moral, Olatunj Ruwase, Rachel Bawden, Stas Bekman, Angelina McMillan-Major, Iz Beltagy, Huu Nguyen, Lucile Saulnier, Samson Tan, Pedro Ortiz Suarez, Victor Sanh, Hugo Laurençon, Yacine Jernite, Julien Launay, Margaret Mitchell, Colin Raffel, Aaron Gokaslan, Adi Simhi, Aitor Soroa, Alham Fikri Aji, Amit Alfassy, Anna Rogers, Ariel Kreisberg Nitzav, Canwen Xu, Chenghao Mou, Chris Emezue, Christopher Klamm, Colin Leong, Daniel van Strien, David Ifeoluwa Adelani, and et al. 2022. BLOOM: A 176b-parameter open-access multilingual language model. CoRR, abs/2211.05100. + +AI Singapore SEA-LION Team. 2024. Llama3 8b cpt sea-lionv2.1 instruct. + +Luca Soldaini, Rodney Kinney, Akshita Bhagia, Dustin Schwenk, David Atkinson, Russell Authur, Ben Bogin, Khyathi Chandu, Jennifer Dumas, Yanai Elazar, Valentin Hofmann, Ananya Jha, Sachin Kumar, Li Lucy, Xinxi Lyu, Nathan Lambert, Ian Magnusson, Jacob Morrison, Niklas Muennighoff, + +Aakanksha Naik, Crystal Nam, Matthew Peters, Abhilasha Ravichander, Kyle Richardson, Zejiang Shen, Emma Strubell, Nishant Subramani, Oyvind Tafjord, Evan Walsh, Luke Zettlemoyer, Noah Smith, Hannaneh Hajishirzi, Iz Beltagy, Dirk Groeneveld, Jesse Dodge, and Kyle Lo. 2024. Dolma: an open corpus of three trillion tokens for language model pretraining research. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 15725-15788, Bangkok, Thailand. Association for Computational Linguistics. +Zayne Sprague, Xi Ye, Kaj Bostrom, Swarat Chaudhuri, and Greg Durrett. 2024. Musr: Testing the limits of chain-of-thought with multistep soft reasoning. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net. +Yosephine Susanto, Adithya Venkatadri Hulagadri, Jann Railey Montalan, Jian Gang Ngui, Xian Bin Yong, Weiqi Leong, Hamsawardhini Rengarajan, Peerat Limkonchotiwat, Yifan Mai, and William Chandra Tjhi. 2025. Sea-helm: Southeast asian holistic evaluation of language models. Preprint, arXiv:2502.14301. +Mirac Suzgun, Nathan Scales, Nathanael Schärli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V. Le, Ed H. Chi, Denny Zhou, and Jason Wei. 2023. Challenging big-bench tasks and whether chain-of-thought can solve them. In *Findings of the Association for Computational Linguistics: ACL* 2023, Toronto, Canada, July 9-14, 2023, pages 13003-13051. Association for Computational Linguistics. +Sho Takase, Ryokan Ri, Shun Kiyono, and Takuya Kato. 2024. Large vocabulary size improves large language models. CoRR, abs/2406.16508. +Sailor Team. 2024. Sailor2: Sailing in south-east asia with inclusive multilingual llms. +The Mosaic ML Team. 2021. composer. https://github.com/mosaicml/composer/. +The Mosaic ML Team. 2022. Llm foundry. https://github.com/mosaicml/llm-foundry. +Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. 2024. Openmathinstruct-2: Accelerating AI for math with massive open-source instruction data. CoRR, abs/2410.01560. +Ahmet Üstün, Viraat Aryabumi, Zheng Xin Yong, WeiYin Ko, Daniel D'souza, Gbemileke Onilude, Neel Bhandari, Shivalika Singh, Hui-Lee Ooi, Amr Kayid, Freddie Vargus, Phil Blunsom, Shayne Longpre, Niklas Muennighoff, Marzieh Fadaee, Julia Kreutzer, and Sara Hooker. 2024. Aya model: An instruction finetuned open-access multilingual language model. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August + +11-16, 2024, pages 15894-15939. Association for Computational Linguistics. +Bin Wang, Zhengyuan Liu, Xin Huang, Fangkai Jiao, Yang Ding, AiTi Aw, and Nancy Chen. 2024a. Sealeval for multilingual foundation models: From crosslingual alignment to cultural reasoning. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), NAACL 2024, Mexico City, Mexico, June 16-21, 2024, pages 370-390. Association for Computational Linguistics. +Ke Wang, Nikolaos Dimitriadis, Guillermo Ortiz-Jiménez, François Fleuret, and Pascal Frossard. 2024b. Localizing task information for improved model merging and compression. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net. +Yubo Wang, Xueguang Ma, Ge Zhang, Yuansheng Ni, Abhranil Chandra, Shiguang Guo, Weiming Ren, Aaran Arulraj, Xuan He, Ziyan Jiang, Tianle Li, Max Ku, Kai Wang, Alex Zhuang, Rongqi Fan, Xiang Yue, and Wenhu Chen. 2024c. Mmlu-pro: A more robust and challenging multi-task language understanding benchmark. CoRR, abs/2406.01574. +Chris Wendler, Veniamin Veselovsky, Giovanni Monea, and Robert West. 2024. Do llamas work in english? on the latent language of multilingual transformers. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pages 15366-15394. Association for Computational Linguistics. +Mitchell Wortsman, Peter J. Liu, Lechao Xiao, Katie E. Everett, Alexander A. Alemi, Ben Adlam, John D. Co-Reyes, Izzeddin Gur, Abhishek Kumar, Roman Novak, Jeffrey Pennington, Jascha Sohl-Dickstein, Kelvin Xu, Jaehoon Lee, Justin Gilmer, and Simon Kornblith. 2024. Small-scale proxies for large-scale transformer training instabilities. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. Open-Review.net. +Zhangchen Xu, Fengqing Jiang, Luyao Niu, Yuntian Deng, Radha Poovendran, Yejin Choi, and Bill Yuchen Lin. 2024. Magpie: Alignment data synthesis from scratch by prompting aligned llms with nothing. CoRR, abs/2406.08464. +An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jianxin Yang, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize + +Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Xuejing Liu, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, Zhifang Guo, and Zhihao Fan. 2024a. Qwen2 technical report. CoRR, abs/2407.10671. +Ziyi Yang, Fanqi Wan, Longguang Zhong, Tianyuan Shi, and Xiaojun Quan. 2024b. Weighted-reward preference optimization for implicit model fusion. CoRR, abs/2412.03187. +Wei Jie Yeo, Teddy Ferdinan, Przemyslaw Kazienko, Ranjan Satapathy, and Erik Cambria. 2024. Self-training large language models through knowledge detection. In *Findings of the Association for Computational Linguistics: EMNLP* 2024, Miami, Florida, USA, November 12-16, 2024, pages 15033-15045. Association for Computational Linguistics. +Le Yu, Bowen Yu, Haiyang Yu, Fei Huang, and Yongbin Li. 2024. Language models are super mario: Absorbing abilities from homologous models as a free lunch. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net. +Wenxuan Zhang, Hou Pong Chan, Yiran Zhao, Mahani Aljunied, Jianyu Wang, Chaoqun Liu, Yue Deng, Zhiqiang Hu, Weiwen Xu, Yew Ken Chia, Xin Li, and Lidong Bing. 2024a. Seallms 3: Open foundation and chat multilingual large language models for southeast asian languages. CoRR, abs/2407.19672. +Xulang Zhang, Rui Mao, and Erik Cambria. 2024b. Multilingual emotion recognition: Discovering the variations of lexical semantics between languages. In International Joint Conference on Neural Networks, IJCNN 2024, Yokohama, Japan, June 30 - July 5, 2024, pages 1-9. IEEE. +Yanli Zhao, Andrew Gu, Rohan Varma, Liang Luo, Chien-Chin Huang, Min Xu, Less Wright, Hamid Shojanazeri, Myle Ott, Sam Shleifer, Alban Desmaison, Can Balioglu, Pritam Damania, Bernard Nguyen, Geeta Chauhan, Yuchen Hao, Ajit Mathews, and Shen Li. 2023. Pytorch FSDP: experiences on scaling fully sharded data parallel. Proc. VLDB Endow., 16(12):3848-3860. +Wenzhen Zheng, Wenbo Pan, Xu Xu, Libo Qin, Li Yue, and Ming Zhou. 2024a. Breaking language barriers: Cross-lingual continual pre-training at scale. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pages 7725-7738. Association for Computational Linguistics. +Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. 2024b. Llamafactory: Unified efficient fine-tuning of $100+$ language models. In Proceedings of the + +62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), Bangkok, Thailand. Association for Computational Linguistics. +Chengzhi Zhong, Fei Cheng, Qianying Liu, Junfeng Jiang, Zhen Wan, Chenhui Chu, Yugo Murawaki, and Sadao Kurohashi. 2024. Beyond english-centric llms: What language do multilingual language models think in? CoRR, abs/2408.10811. +Jeffrey Zhou, Tianjian Lu, Swaroop Mishra, Siddhartha Brahma, Sujoy Basu, Yi Luan, Denny Zhou, and Le Hou. 2023. Instruction-following evaluation for large language models. CoRR, abs/2311.07911. + +# A Appendix + +# A.1 Continued pre-training (CPT) data + +Existing data: We utilize existing datasets as shown in Table 6 (HuggingFace Datasets). + +Other data: As shown in Table 6 (the other data section), the listed datasets contain data from a diverse range of domains, including news, books, articles, poems, etc. + +
Continued Pre-training Data
Source (HuggingFace Datasets)LanguagesSize (Billions of Tokens)
bigcode/the-stack-v2-dedupCODE40
allenai/dolmaEN37.5
HuggingFaceFW/fineweb-eduEN7.5
aisingapore/SEA-PILE-v1SEA47.58
aisingapore/SEA-PILE-v2ID7
Source (Others)LanguagesSize (Billions of Tokens)
VinBigDataVI16
WangChanBERTaTH8.5
Others - ENEN5
Others - SEASEA30.92
+ +Table 6: List of datasets for the continued pre-training stage. + +# A.2 Stage 1 IFT data + +Stage 1 IFT Datasets + +
Source (HuggingFace Datasets)LanguagesSize
BAAI/Infinity-InstructEN7,449,106
nvidia/OpenMathInstruct-2EN2,000,000
+ +Table 7: List of datasets for Stage-1-IFT. For BAAI/Infinity-Instruct dataset, any conversation that originally ended with a user turn has had that last turn removed. + +# A.3 Stage 2 IFT data + +Existing data: We utilize existing datasets as shown in Table 9 (HuggingFace Datasets). + +Synthetic data: As shown in Table 9 (the generated part), we describe how to formulate synthetic data as follows + +- qwen_gemma_synthetic datasets are generated first in English with Qwen 32B, utilizing an approach similar to Magpie. Instructions are then translated into the target language with Gemma 2 27B. +- Llama_gemma_synthetic datasets are generated first in English with Llama 3.1 70B, utilizing an approach similar to Magpie (Xu et al., 2024). Instructions are then translated into the target language with Gemma 2 27B. +- gemma_synthetic datasets are generated directly with Gemma 2 27B using Magpie (Xu et al., 2024). +- sea_multilingual_systemchat is a synthetic dataset translated with Gemma 2 27B from the English systemchat dataset. +- rewritten_oasst is a dataset rewritten with Gemma 2 27B based on the English OASST dataset. +- rewritten_helpsteer is a dataset rewritten with Gemma 2 27B based on the English Helpsteer dataset. + +# A.4 Helpfulness and preference alignment data + +As shown in Table 8, we use the princeton-nlp/gemma2-ultrafeedback-armorm as the source of the alignment data. We then further re-scored with the reward model, nvidia/Llama-3.1-Nemotron-70B-Reward to create the SEA version. In particular, generated-gemma2-27b-seapref-nemotron-70b takes prompts from seald, wangchan_thainstruct, and additional hand-written Southeast Asian cultural prompts collected from native speakers and then generates responses (with a varying temperature) from them with Gemma 2 27B. The responses are then scored with nvidia/Llama-3.1-Nemotron-70B-Reward, with the top-scoring response selected as chosen and vice versa, similar to princeton-nlp/gemma2-ultrafeedback-armorm. + +Preference Data + +
Source (HuggingFace Datasets)LanguagesSize
princeton-nlp/gemma2-ultrafeedback-armormEN61,510
Source (Generated)LanguagesSize
generated-gemma2-27b-seapref-nemotron-70bSEA5,511
+ +Table 8: List of preference datasets used for the alignment stage. +Stage 2 IFT Datasets + +
Source (HuggingFace Datasets)LanguagesSize
BAAI/Infinity-Instruct^*EN1,456,927
HuggingFaceTB/smoltalkEN409,537
allenai/tulu-3-sft-personas-mathEN149,960
parinzee/seed-free-synthetic-instruct-thai-v1TH118,898
HuggingFaceTB/smoltalkEN96,356
HuggingFaceTB/smoltalkEN83,144
arcee-ai/EvolKit-75KEN74,174
AI-MO/NuminaMath-TIREN72,441
Post-training-Data-Flywheel/AutoIF-instruct-61kEN61,492
argilla/ifeval-like-dataEN56,339
HuggingFaceTB/smoltalkEN53,342
ai2-adapt-dev/tulu_v3.9_wildjailbreak_decontaminated_50kEN50,000
ai2-adapt-dev/tulu_v3.9_synthetic_finalresp_wildguardmixtrain_decontaminated_50kEN50,000
allenai/tulu-3-sft-personas-math-gradeEN49,980
allenai/tulu-3-sft-personas-codeEN34,999
HuggingFaceTB/smoltalkEN34,424
allenai/tulu-3-sft-personas-instruction-followingEN29,980
airesearch/WangchanThaiInstructTH25,014
allenai/tulu-3-sft-personas-algebraEN20,000
arcee-ai/EvolKit-20k-viVI15,378
allenai/coconotEN10,983
ai2-adapt-dev/tulu_v3.9_scirff_10kEN10,000
Source (Generated)LanguagesSize
qwen_gemma_synthetic_tamilTA480,000
qwen_gemma_synthetic_thaiTH480,000
qwen_gemma_synthetic_indonesianID465,019
qwen_gemma_synthetic_vietnameseVI465,019
gemma_synthetic_indonesianID458,149
gemma_synthetic_filipinoTL455,093
gemma_synthetic_vietVI291,576
gemma_synthetic_tamilTA276,314
gemma_synthetic_thaiTH186,339
gemma_synthetic_javaneseJV110,000
gemma_synthetic_sudaneseSU110,000
llama_gemma_synthetic_thaiTH88,920
llama_gemma_synthetic_tamilTA88,920
llama_gemma_synthetic_vietnameseVI88,920
llama_gemma_synthetic_javaneseJV88,920
llama_gemma_synthetic_indonesianID88,920
llama_gemma_synthetic_filipinoTL80,000
enrich_27kSEA27,463
seaMultilingual_systemchatSEA1,903
rewritten_oasstSEA841
rewritten_helpsteerSEA838
+ +Table 9: List of datasets for Stage-2-IFT. \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05747/images/0f75f24e6df40fe78b1dd64776ad1d19b8df275d0fba107911bc92e1b8175379.jpg b/data/2025/2504_05xxx/2504.05747/images/0f75f24e6df40fe78b1dd64776ad1d19b8df275d0fba107911bc92e1b8175379.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69d19ebe8fdc3b3f6008d53b745e86e6e7bf7e26 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/images/0f75f24e6df40fe78b1dd64776ad1d19b8df275d0fba107911bc92e1b8175379.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5c24052879c9fce04fe27ab50f90e4c583a024e28e17cce353dc80c57c05fe6 +size 182451 diff --git a/data/2025/2504_05xxx/2504.05747/images/4867daa85c3c9dd6e65602d241de43dd14c0427a828b2219e89a5413008cb6c3.jpg b/data/2025/2504_05xxx/2504.05747/images/4867daa85c3c9dd6e65602d241de43dd14c0427a828b2219e89a5413008cb6c3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f592767a24e1c6b64a2bb98f3d9e32f3ca39a752 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/images/4867daa85c3c9dd6e65602d241de43dd14c0427a828b2219e89a5413008cb6c3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95c074a9759b545cf9a173ecd53c9f969f6677fdca1bdb49b038a6ef66a78839 +size 66671 diff --git a/data/2025/2504_05xxx/2504.05747/images/4c339bc0c829686d5690eeb36036737c9fc9fc6bbb2f0e5a2fc19dd66e119f36.jpg b/data/2025/2504_05xxx/2504.05747/images/4c339bc0c829686d5690eeb36036737c9fc9fc6bbb2f0e5a2fc19dd66e119f36.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2ae1bcac8d737aa9ae1c6f80f950074b14b2d214 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/images/4c339bc0c829686d5690eeb36036737c9fc9fc6bbb2f0e5a2fc19dd66e119f36.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b790cd98a8ef3c20ce3733d32f71cee330ac199803751066d717093a1b765620 +size 42082 diff --git a/data/2025/2504_05xxx/2504.05747/images/4e16748c2d5d2a0090d54b396c70b170858f6680defc5b52a25cfe87b65aacd2.jpg b/data/2025/2504_05xxx/2504.05747/images/4e16748c2d5d2a0090d54b396c70b170858f6680defc5b52a25cfe87b65aacd2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b4f1be368424e7ce5eeb604ce7661830a3ab4f0a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/images/4e16748c2d5d2a0090d54b396c70b170858f6680defc5b52a25cfe87b65aacd2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6714721883376e703eecb1f7c06603efd8a8284f381177e9e93711ae2c50cd8b +size 15753 diff --git a/data/2025/2504_05xxx/2504.05747/images/55f574138c852425fa5e8074e6ad64c6e4c0ac613ddfd8d9c24174ce9df33553.jpg b/data/2025/2504_05xxx/2504.05747/images/55f574138c852425fa5e8074e6ad64c6e4c0ac613ddfd8d9c24174ce9df33553.jpg new file mode 100644 index 0000000000000000000000000000000000000000..839fb32ded519fcd36fdc7e4cd98041effad773a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/images/55f574138c852425fa5e8074e6ad64c6e4c0ac613ddfd8d9c24174ce9df33553.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d494cb5d3b8f85cabdec1daaa3fe5582602187877195a72f87e051105b205b8 +size 13496 diff --git a/data/2025/2504_05xxx/2504.05747/images/78429de77740e6d1e87d54a49d5ac0a30a1623513b766b33f3be80741d5f7606.jpg b/data/2025/2504_05xxx/2504.05747/images/78429de77740e6d1e87d54a49d5ac0a30a1623513b766b33f3be80741d5f7606.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1941bf6e7f7edaeb6b411322096995d44b8c62f7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/images/78429de77740e6d1e87d54a49d5ac0a30a1623513b766b33f3be80741d5f7606.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb30f7e7ba76b5876b24c12121ffbf93f6541ba1048aedc4b8d3b427cc1f9a00 +size 136182 diff --git a/data/2025/2504_05xxx/2504.05747/images/9138939f8b29aa04aaea835f7a6d240c1c7202541e014a22e29a7fadfe0d65fb.jpg b/data/2025/2504_05xxx/2504.05747/images/9138939f8b29aa04aaea835f7a6d240c1c7202541e014a22e29a7fadfe0d65fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2f2b38f90b2bc40c40a47a749e63312adff2103f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/images/9138939f8b29aa04aaea835f7a6d240c1c7202541e014a22e29a7fadfe0d65fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bcb6039b27ad375b63742f55bf92cf9c3c9aaaa2f3ac38f90f115a7eaa0e0ca +size 38674 diff --git a/data/2025/2504_05xxx/2504.05747/images/95807f122e753274efc4c43da49e9d40d32d3ff051642f5e33ffc65fc2dd8d5a.jpg b/data/2025/2504_05xxx/2504.05747/images/95807f122e753274efc4c43da49e9d40d32d3ff051642f5e33ffc65fc2dd8d5a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..11b549fb3dbeffda4ed35094335d9137f4763280 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/images/95807f122e753274efc4c43da49e9d40d32d3ff051642f5e33ffc65fc2dd8d5a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f72dd780fbbebe34e62cbed2c58aeb0d14397e6feaefcc4b630f88cdcc1a7b9 +size 40420 diff --git a/data/2025/2504_05xxx/2504.05747/images/9f6b9f3761ad7213c0ce54d33552c028e28aae4e28023f2ef7dc3583c49097e7.jpg b/data/2025/2504_05xxx/2504.05747/images/9f6b9f3761ad7213c0ce54d33552c028e28aae4e28023f2ef7dc3583c49097e7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94b909b9da022cb708c525fb5fe7ef31b0bcbfea --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/images/9f6b9f3761ad7213c0ce54d33552c028e28aae4e28023f2ef7dc3583c49097e7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c3bd2776f5d79890ce859518117788c8ee9b06ba8b1e4e165a8a9199cadf174 +size 4948 diff --git a/data/2025/2504_05xxx/2504.05747/images/d1fb51f7d5203701b1c1be87c57a86727b27c69725ee0cf2689b1ea2cdbdd506.jpg b/data/2025/2504_05xxx/2504.05747/images/d1fb51f7d5203701b1c1be87c57a86727b27c69725ee0cf2689b1ea2cdbdd506.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cfd5eab31db91021ebc454dd6df97dbff99fd652 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/images/d1fb51f7d5203701b1c1be87c57a86727b27c69725ee0cf2689b1ea2cdbdd506.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb2739dc25d5729703d4211fd0a609acca60e728fa85059f4241f9f0e5465a71 +size 42352 diff --git a/data/2025/2504_05xxx/2504.05747/images/db3a7dde14ce0b341033213c154d6a9aa45d69bffbec2d1963b8cbfc70cff39a.jpg b/data/2025/2504_05xxx/2504.05747/images/db3a7dde14ce0b341033213c154d6a9aa45d69bffbec2d1963b8cbfc70cff39a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..749082af956588573e3836ff1afe5b2275c974b0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/images/db3a7dde14ce0b341033213c154d6a9aa45d69bffbec2d1963b8cbfc70cff39a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:397cf7cf957640763d35fd5bb49dfc5f77bf20a5ba6912a02a943e30be7e560c +size 173130 diff --git a/data/2025/2504_05xxx/2504.05747/images/dd8f45e05294968ff296d594ea86737670808790006a8a27a017ff220a05095b.jpg b/data/2025/2504_05xxx/2504.05747/images/dd8f45e05294968ff296d594ea86737670808790006a8a27a017ff220a05095b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..818963349ff32b631ec6da0e0844f335dc8706ab --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/images/dd8f45e05294968ff296d594ea86737670808790006a8a27a017ff220a05095b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3aec199588df527d75fbc098e18879f13ed58ea7a4a0e62f3f5eedcca59ba92 +size 23003 diff --git a/data/2025/2504_05xxx/2504.05747/images/e406902fdae58a202c5ead85c91057043dad6359c378e31f2bf590aaf4e7548a.jpg b/data/2025/2504_05xxx/2504.05747/images/e406902fdae58a202c5ead85c91057043dad6359c378e31f2bf590aaf4e7548a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69d3453a6513aa46a2ad47240c37a2fc1bfa4321 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/images/e406902fdae58a202c5ead85c91057043dad6359c378e31f2bf590aaf4e7548a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ddecc5341cb76e67c8a403e327e5a79ed76aa24ae2b6a3c9272cf18828ae21e +size 99875 diff --git a/data/2025/2504_05xxx/2504.05747/layout.json b/data/2025/2504_05xxx/2504.05747/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..68f4b79af15ed0b6f1a6ff0fda10bb7b40789c0d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05747/layout.json @@ -0,0 +1,7833 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 98, + 69, + 137, + 107 + ], + "blocks": [ + { + "bbox": [ + 98, + 69, + 137, + 107 + ], + "lines": [ + { + "bbox": [ + 98, + 69, + 137, + 107 + ], + "spans": [ + { + "bbox": [ + 98, + 69, + 137, + 107 + ], + "type": "image", + "image_path": "9f6b9f3761ad7213c0ce54d33552c028e28aae4e28023f2ef7dc3583c49097e7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 142, + 80, + 493, + 98 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 80, + 493, + 98 + ], + "spans": [ + { + "bbox": [ + 142, + 80, + 493, + 98 + ], + "type": "text", + "content": "SEA-LION: Southeast Asian Languages in One Network" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 107, + 518, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 107, + 518, + 247 + ], + "spans": [ + { + "bbox": [ + 77, + 107, + 518, + 247 + ], + "type": "text", + "content": "Raymond Ng\\*, Thanh Ngan Nguyen\\*, Yuli Huang\\*, Ngee Chia Tai\\*, Wai Yi Leong\\*, Wei Qi Leong\\*, Xianbin Yong\\*, Jian Gang Ngui\\*, Yosephine Susanto\\*, Nicholas Cheng\\*, Hamsawardhini Rengarajan\\*, Peerat Limkonchotiwat\\*, Adithya Venkatadri Hulagadri\\*, Kok Wai Teng\\*, Yeo Yeow Tong\\*, Bryan Siow\\*, Wei Yi Teo\\*, Wayne Lau\\*, Choon Meng Tan\\*, Brandon Ong\\*, Zhi Hao Ong\\*, Jann Railey Montalan\\*, Adwin Chan\\*, Sajeban Antonyrex\\*, Ren Lee\\*, Esther Choa\\*, David Ong Tat-Wee\\*, Bing Jie Darius Liu\\*, William Chandra Tjhi\\*, Erik Cambria\\*, Leslie Teo\\* AI Singapore, National University of Singapore \\*Nanyang Technological University https://sea-lion.ai" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 155, + 260, + 202, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 260, + 202, + 272 + ], + "spans": [ + { + "bbox": [ + 155, + 260, + 202, + 272 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 84, + 280, + 274, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 280, + 274, + 580 + ], + "spans": [ + { + "bbox": [ + 84, + 280, + 274, + 580 + ], + "type": "text", + "content": "Recently, Large Language Models (LLMs) have dominated much of the artificial intelligence scene with their ability to process and generate natural languages. However, the majority of LLM research and development remains English-centric, leaving low-resource languages such as those in the Southeast Asian (SEA) region underrepresented. To address this representation gap, we introduce Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, two cutting-edge multilingual LLMs designed for SEA languages. The SEA-LION family of LLMs supports 11 SEA languages, namely English, Chinese, Indonesian, Vietnamese, Malay, Thai, Burmese, Lao, Filipino, Tamil, and Khmer. Our work leverages large-scale multilingual continued pre-training with a comprehensive post-training regime involving multiple stages of instruction fine-tuning, alignment, and model merging. Evaluation results on multilingual benchmarks show that our models achieve state-of-the-art performance across LLMs supporting SEA languages. We open-source the models to benefit the wider SEA community." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 587, + 154, + 599 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 587, + 154, + 599 + ], + "spans": [ + { + "bbox": [ + 68, + 587, + 154, + 599 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 608, + 291, + 757 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 608, + 291, + 757 + ], + "spans": [ + { + "bbox": [ + 67, + 608, + 291, + 757 + ], + "type": "text", + "content": "Large language models (LLMs) have significantly transformed the field of natural language processing, achieving remarkable performance in text generation, summarization and sentiment analysis (Brown et al., 2020; OpenAI, 2023; Dubey et al., 2024; Rivière et al., 2024; Zhang et al., 2024b; Yeo et al., 2024). Despite their impressive capabilities, most LLMs remain heavily English-centric (Wendler et al., 2024; Zhong et al., 2024). Unfortunately, this situation has led LLMs in regions with many under-represented languages such" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 260, + 526, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 260, + 526, + 354 + ], + "spans": [ + { + "bbox": [ + 302, + 260, + 526, + 354 + ], + "type": "text", + "content": "as Southeast Asia (SEA) to suffer. Languages with lower resources, such as Filipino, Lao, Burmese and Khmer in the SEA region, are not supported by many open-source English-centric LLMs. This underscores the need to bridge the resource and representation gap between English and SEA languages." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 358, + 526, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 358, + 526, + 641 + ], + "spans": [ + { + "bbox": [ + 302, + 358, + 526, + 641 + ], + "type": "text", + "content": "Recently, there have been many attempts to create multilingual LLMs in an open-source manner, e.g., BLOOM (Scao et al., 2022), a project aimed at increasing multilingual presence in opensource LLMs by supporting 46 languages. Popular LLM families such as Llama (Dubey et al., 2024), Gemma (Rivière et al., 2024) and Qwen (Yang et al., 2024a) have also introduced multilingual LLMs for their latest iteration. During our evaluations, we found that the performance of these models is acceptable in the general case, i.e., when considering evaluation benchmarks formulated from English datasets. However, we observe that the performance degrades on SEA-specific benchmarks. Moreover, researchers have also introduced LLMs such as SeaLLMs (Nguyen et al., 2024; Zhang et al., 2024a) and Sailor (Dou et al., 2024) to specifically address the LLM gap in SEA languages. However, the performance of these models is less than ideal for languages such as Thai or Tamil" + }, + { + "bbox": [ + 302, + 358, + 526, + 641 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 302, + 358, + 526, + 641 + ], + "type": "text", + "content": " (10X et al., 2024; AI Products Team, 2024)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 645, + 526, + 739 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 645, + 526, + 739 + ], + "spans": [ + { + "bbox": [ + 302, + 645, + 526, + 739 + ], + "type": "text", + "content": "In this paper, we address the issues by proposing a robust open-source Southeast Asian model with data transparency for reproducibility, namely SEA-LION - a family of LLMs continued pretrained (CPT) and fine-tuned on Llama-3.1-8B-Instruct for Llama-SEA-LION-8B-IT and Gemma2-9B for Gemma-SEA-LION-9B-IT with a focus" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 81, + 762, + 197, + 773 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 762, + 197, + 773 + ], + "spans": [ + { + "bbox": [ + 81, + 762, + 197, + 773 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 81, + 762, + 197, + 773 + ], + "type": "text", + "content": "SEA-LION Models Collection" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 752, + 525, + 775 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 752, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 752, + 525, + 775 + ], + "type": "text", + "content": "2Tamil is one of the official languages in Singapore. It is also spoken in other areas in the SEA region, such as Malaysia." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 13, + 244, + 36, + 593 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 13, + 244, + 36, + 593 + ], + "spans": [ + { + "bbox": [ + 13, + 244, + 36, + 593 + ], + "type": "text", + "content": "arXiv:2504.05747v4 [cs.CL] 30 Oct 2025" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 273 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 273 + ], + "type": "text", + "content": "on SEA languages. To tackle the performance problem, we utilize 200 billion English, code, and SEA languages tokens as well as 16.8 million English and SEA languages instruction and answer pairs for CPT and post-training steps, respectively, to achieve a significant improvement in SEA languages. In order to allow our models to be used by everyone without restrictions, we release our models under the fully open MIT license. We benchmark our models against the SEA-HELM(Susanto et al., 2025) and Open LLM Leaderboard3 with other LLMs of similar sizes in Southeast Asia like Sailor 2 (Team, 2024) and SeaLLMs 3 (Zhang et al., 2024a), where our models achieve state-of-the-art performances." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 275, + 290, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 275, + 290, + 301 + ], + "spans": [ + { + "bbox": [ + 67, + 275, + 290, + 301 + ], + "type": "text", + "content": "We summarize the contribution of our paper as follows." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 71, + 302, + 291, + 533 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 71, + 302, + 291, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 302, + 291, + 356 + ], + "spans": [ + { + "bbox": [ + 71, + 302, + 291, + 356 + ], + "type": "text", + "content": "- We released two LLMs, Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, that are meticulously trained to accurately represent the unique linguistic diversity of SEA languages." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 71, + 357, + 290, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 357, + 290, + 396 + ], + "spans": [ + { + "bbox": [ + 71, + 357, + 290, + 396 + ], + "type": "text", + "content": "- We also provide in-depth insights in this paper into our end-to-end training workflow to benefit the community developing multilingual LLMs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 71, + 397, + 291, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 397, + 291, + 533 + ], + "spans": [ + { + "bbox": [ + 71, + 397, + 291, + 533 + ], + "type": "text", + "content": "- We present a reproducible dataset development process, covering sourcing and the model training process. We release our training artifacts, including the training dataset, training scripts, training checkpoints, and fine-tuned models, including Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, to provide strong baselines, promote reproducibility, and enable future research on applications that require SEA-specific knowledge4." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 559, + 244, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 559, + 244, + 574 + ], + "spans": [ + { + "bbox": [ + 67, + 559, + 244, + 574 + ], + "type": "text", + "content": "2 Continued pre-training (CPT)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 582, + 177, + 595 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 582, + 177, + 595 + ], + "spans": [ + { + "bbox": [ + 67, + 582, + 177, + 595 + ], + "type": "text", + "content": "2.1 Pre-training data" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 600, + 291, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 600, + 291, + 722 + ], + "spans": [ + { + "bbox": [ + 67, + 600, + 291, + 722 + ], + "type": "text", + "content": "The CPT data consists of a curated set of English, multilingual, and code corpora from several open source repositories like Dolma (Soldaini et al., 2024), FineWeb (Penedo et al., 2024), the-stackv2 (Lozhkov et al., 2024), SEA-LION-Pile (AI Singapore, 2023), SEA-LION-Pilev2 (AI Singapore, 2025), as well as documents from CommonCrawl (CommonCrawl, 2024) and from the public domain, such as Wikipedia (Foun" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 71, + 527, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 527, + 153 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 527, + 153 + ], + "type": "text", + "content": "dation, 2024). For SEA-LION-Pilev2, we filter CommonCrawl WARC data for documents in SEA languages (i.e., Burmese, Simplified Chinese, Indonesian, Khmer, Lao, Malay, Filipino, Tamil, Thai, and Vietnamese) using the pretrained fast-text language classifier (Joulin et al., 2017)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 162, + 527, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 162, + 527, + 352 + ], + "spans": [ + { + "bbox": [ + 302, + 162, + 527, + 352 + ], + "type": "text", + "content": "A document is retained if the language code reported in its metadata matches that of one of the aforementioned SEA languages. Additionally, we further clean up the data with Trafilatura (Barbaresi, 2021). To determine the optimal dataset ratio between SEA languages, code, and English for the CPT process, we conduct a series of small-scale CPT experiments, each with a training budget of 10 billion tokens and varying proportions of English, code, and SEA language data. We settled on an optimal data mix ratio of " + }, + { + "bbox": [ + 302, + 162, + 527, + 352 + ], + "type": "inline_equation", + "content": "55\\%" + }, + { + "bbox": [ + 302, + 162, + 527, + 352 + ], + "type": "text", + "content": " SEA languages, " + }, + { + "bbox": [ + 302, + 162, + 527, + 352 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 302, + 162, + 527, + 352 + ], + "type": "text", + "content": " English, and " + }, + { + "bbox": [ + 302, + 162, + 527, + 352 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 302, + 162, + 527, + 352 + ], + "type": "text", + "content": " code tokens for a budget of 200 billion tokens. For a detailed breakdown of the token count by languages, please refer to Table 6." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 391, + 390, + 406 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 391, + 390, + 406 + ], + "spans": [ + { + "bbox": [ + 302, + 391, + 390, + 406 + ], + "type": "text", + "content": "2.2 CPT process" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 428, + 525, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 428, + 525, + 467 + ], + "spans": [ + { + "bbox": [ + 302, + 428, + 525, + 467 + ], + "type": "text", + "content": "Model selection. For the models to CPT from, we choose Llama-3.1-8B-Instruct (Dubey et al., 2024) and Gemma-2-9B (Rivière et al., 2024)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 477, + 527, + 776 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 477, + 527, + 776 + ], + "spans": [ + { + "bbox": [ + 302, + 477, + 527, + 776 + ], + "type": "text", + "content": "Training setup. Following previous works (Dou et al., 2024), we use BPE-Dropout (Provilkov et al., 2020) to increase the performance and robustness of the training. We use a Warmup-Stable-Decay (WSD) (Hu et al., 2024) scheduler with warm-up and cooldown phases each representing " + }, + { + "bbox": [ + 302, + 477, + 527, + 776 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 302, + 477, + 527, + 776 + ], + "type": "text", + "content": " of the entire training budget. We use the AdamW (Loshchilov and Hutter, 2019) optimizer with the maximum learning rate (LR) set to " + }, + { + "bbox": [ + 302, + 477, + 527, + 776 + ], + "type": "inline_equation", + "content": "1e^{-5}" + }, + { + "bbox": [ + 302, + 477, + 527, + 776 + ], + "type": "text", + "content": " and the final LR after cooldown is " + }, + { + "bbox": [ + 302, + 477, + 527, + 776 + ], + "type": "inline_equation", + "content": "1e^{-7}" + }, + { + "bbox": [ + 302, + 477, + 527, + 776 + ], + "type": "text", + "content": ". Following Wortsman et al. (2024), we set epsilon to " + }, + { + "bbox": [ + 302, + 477, + 527, + 776 + ], + "type": "inline_equation", + "content": "1e^{-15}" + }, + { + "bbox": [ + 302, + 477, + 527, + 776 + ], + "type": "text", + "content": ". We use Composer (Team, 2021) and LLM Foundry (Team, 2022) for distributed training using Fully Sharded Data Parallel (Zhao et al., 2023) on a cluster of eight nodes of the p5.48xlarge instance from Amazon Web Services (AWS). The total training duration was approximately 6 days and 10 days for the Llama 3.1 and Gemma 2 models, respectively. In this paper, we refer to the post-CPT models as Llama-SEA-LION-8B and Gemma-SEA-LION-9B for the Llama 3.1 and Gemma 2 continued pre-trained models, respectively." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 80, + 731, + 175, + 743 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 731, + 175, + 743 + ], + "spans": [ + { + "bbox": [ + 80, + 731, + 175, + 743 + ], + "type": "text", + "content": "3Open LLM Leaderboard" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 743, + 290, + 773 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 743, + 290, + 773 + ], + "spans": [ + { + "bbox": [ + 67, + 743, + 290, + 773 + ], + "type": "text", + "content": "4Please visit https://huggingface.co/aisingapore for all artifacts in this paper, including training data and other versions of SEA-LION" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 71, + 157, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 71, + 157, + 84 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 157, + 84 + ], + "type": "text", + "content": "3 Post-training" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 100, + 180, + 113 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 100, + 180, + 113 + ], + "spans": [ + { + "bbox": [ + 68, + 100, + 180, + 113 + ], + "type": "text", + "content": "3.1 Post-training data" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 124, + 290, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 124, + 290, + 179 + ], + "spans": [ + { + "bbox": [ + 67, + 124, + 290, + 179 + ], + "type": "text", + "content": "The post-training data consists of 3 subsets of data for Stage 1 IFT, Stage 2 IFT, and the Preference dataset for alignment, respectively. We describe the training data information of each step as follows." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 181, + 290, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 181, + 290, + 290 + ], + "spans": [ + { + "bbox": [ + 67, + 181, + 290, + 290 + ], + "type": "text", + "content": "Stage 1 IFT. In this step, we employ Infinity-Instruct [Foundation and Chat] (Beijing Academy of Artificial Intelligence, 2024) and OpenMath-Instruct 2 (Toshniwal et al., 2024) to improve the mathematical, reasoning, and coding skills of the instruction model. The full details of the training data are shown in Appendix 7." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 293, + 290, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 293, + 290, + 428 + ], + "spans": [ + { + "bbox": [ + 67, + 293, + 290, + 428 + ], + "type": "text", + "content": "Stage 2 IFT. Then, in this step, we use generalized large-scale instructions on the previous instruction model. In particular, we employ 22 existing datasets (written in English, Thai, and Vietnamese) and formulate new 22 synthetic datasets using various models and techniques to create SEA instruction datasets (see Appendix A.3 for the full data generation details). As shown in Appendix 9, we use a total of 7,298,828 instruction samples that cover 11 languages." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 432, + 290, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 432, + 290, + 526 + ], + "spans": [ + { + "bbox": [ + 67, + 432, + 290, + 526 + ], + "type": "text", + "content": "Helpfulness and preference alignment. We also conduct an alignment learning on top of the instruction model using a feedback dataset called UltraFeedback (Cui et al., 2024). In addition, we also synthesized the SEA version of the UltraFeedback using NemoTron-70b with Gemma2 as a reward model, see Appendix A.4 for the full details." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 69, + 542, + 286, + 695 + ], + "blocks": [ + { + "bbox": [ + 69, + 542, + 286, + 695 + ], + "lines": [ + { + "bbox": [ + 69, + 542, + 286, + 695 + ], + "spans": [ + { + "bbox": [ + 69, + 542, + 286, + 695 + ], + "type": "image", + "image_path": "9138939f8b29aa04aaea835f7a6d240c1c7202541e014a22e29a7fadfe0d65fb.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 703, + 291, + 765 + ], + "lines": [ + { + "bbox": [ + 67, + 703, + 291, + 765 + ], + "spans": [ + { + "bbox": [ + 67, + 703, + 291, + 765 + ], + "type": "text", + "content": "Figure 1: Training process of Llama-SEA-LION-8B-IT (Section 3.2.1). The post-training process consists of 2 stages of instruction fine-tuning, an alignment stage and multiple merge stages. Dotted lines denote a merge stage and solid lines denote an alignment stage." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 303, + 71, + 429, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 71, + 429, + 84 + ], + "spans": [ + { + "bbox": [ + 303, + 71, + 429, + 84 + ], + "type": "text", + "content": "3.2 Post-training process" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 89, + 525, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 89, + 525, + 238 + ], + "spans": [ + { + "bbox": [ + 302, + 89, + 525, + 238 + ], + "type": "text", + "content": "We use LLaMaFactory (Zheng et al., 2024b) with DeepSpeed (Rasley et al., 2020) for all Instruction Fine Tuning (IFT) and alignment steps. All IFT stages are performed using full model finetuning, where the models are from the previous step (Section 2.2) and existing models. We use MergeKit (Goddard et al., 2024) with a value of 1 for weight and density parameters for all merge steps. Models selected for merging are selected empirically, based on the openness of model licenses, the suitability for merging and performance." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 244, + 458, + 257 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 244, + 458, + 257 + ], + "spans": [ + { + "bbox": [ + 302, + 244, + 458, + 257 + ], + "type": "text", + "content": "3.2.1 Llama-SEA-LION-8B-IT" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 260, + 525, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 260, + 525, + 394 + ], + "spans": [ + { + "bbox": [ + 302, + 260, + 525, + 394 + ], + "type": "text", + "content": "Stage 1 IFT As shown in Figure 1, we started off the post-training phase with IFT of Llama-SEA-LION-8B with the Infinity Instruct (Foundation) (Beijing Academy of Artificial Intelligence, 2024) and OpenMathInstruct2 (Toshniwal et al., 2024) datasets. Both datasets contain approximately 9.5 million instruction pairs, primarily in English and centered around reasoning, math, and code. We refer to the model at this stage as Stage1-Llama." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 396, + 525, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 396, + 525, + 557 + ], + "spans": [ + { + "bbox": [ + 302, + 396, + 525, + 557 + ], + "type": "text", + "content": "Stage 2 IFT We performed a second round of IFT using the SEA-Instruct dataset, which consists of approximately 7.3 million instruction pairs, of which 5 million instruction pairs are generated using the Gemma-2-27B-Instruct (Rivière et al., 2024) model and the Qwen2.5-32B-Instruct model (Yang et al., 2024a) in SEA languages. The remaining are English language instruction pairs from the Infinity-Instruct (Chat) (Beijing Academy of Artificial Intelligence, 2024) dataset. We refer to the model at this stage as Stage-2-Llama." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 558, + 525, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 558, + 525, + 640 + ], + "spans": [ + { + "bbox": [ + 302, + 558, + 525, + 640 + ], + "type": "text", + "content": "First merge After finishing the IFT stages, we performed the first of a series of merges by merging Stage-1-Llama and Stage-2-Llama into the Llama-SEA-LION-8B using the DARE TIES (Yu et al., 2024; Ilharco et al., 2023) method. We refer to the model at this stage as Merge-1-Llama." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 640, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 640, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 640, + 525, + 775 + ], + "type": "text", + "content": "Second merge In order to mitigate catastrophic forgetting due to the fine-tuning process (Alexandrov et al., 2024), we performed the second round of merging by merging top-performing instruction-tuned models that share the Llama 3.1 lineage. We merge the original Llama-3.1-8B-Instruct, Llama3-8B-SEA-LION-v2.1-Instruct (SEA-LION Team, 2024), and SuperNova-Lite (Arcee-AI, 2024) into Merge-1-Llama using the Consensus TA (Wang et al., 2024b; Ilharco et al., 2023) merge method." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 97 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 97 + ], + "type": "text", + "content": "We refer to the model at this stage as Merge-2-Llama." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 99, + 291, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 99, + 291, + 167 + ], + "spans": [ + { + "bbox": [ + 67, + 99, + 291, + 167 + ], + "type": "text", + "content": "Helpfulness and preference alignment We performed one round of alignment on Merge-2-Llama using SimPO (Meng et al., 2024) with the SEA-Preference dataset. We refer to the model at this stage as Aligned-SimPO-Llama." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 168, + 292, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 168, + 292, + 248 + ], + "spans": [ + { + "bbox": [ + 67, + 168, + 292, + 248 + ], + "type": "text", + "content": "Final merge Lastly, we perform a merge using the DELLA-Linear merge. With the original Llama3.1-8B-Instruct model as the base for merging, we merge in Merge-2-Llama and Aligned-SIMPO-Llama to produce the final model, Llama-SEA-LION-v3-9B-IT." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 274, + 229, + 285 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 274, + 229, + 285 + ], + "spans": [ + { + "bbox": [ + 67, + 274, + 229, + 285 + ], + "type": "text", + "content": "3.2.2 Gemma-SEA-LION-9B-IT" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 70, + 299, + 289, + 453 + ], + "blocks": [ + { + "bbox": [ + 70, + 299, + 289, + 453 + ], + "lines": [ + { + "bbox": [ + 70, + 299, + 289, + 453 + ], + "spans": [ + { + "bbox": [ + 70, + 299, + 289, + 453 + ], + "type": "image", + "image_path": "4c339bc0c829686d5690eeb36036737c9fc9fc6bbb2f0e5a2fc19dd66e119f36.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 461, + 291, + 522 + ], + "lines": [ + { + "bbox": [ + 67, + 461, + 291, + 522 + ], + "spans": [ + { + "bbox": [ + 67, + 461, + 291, + 522 + ], + "type": "text", + "content": "Figure 2: Training process of Gemma-SEA-LION-9B-IT (Section 3.2.2). The post-training process comprises two stages of instruction fine-tuning, an alignment stage, and multiple merge stages. Dotted lines denote a merge stage and solid lines denote an alignment stage." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 531, + 291, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 531, + 291, + 612 + ], + "spans": [ + { + "bbox": [ + 67, + 531, + 291, + 612 + ], + "type": "text", + "content": "Stage 1 and Stage 2 IFT Similar to the Llama-SEA-LION-8B-IT, we started off the post-training phase with both stages of IFT using the same datasets on the Gemma-2-9B model (Rivière et al., 2024). We refer to both models at stage 1 and stage 2 as Stage-1-Gemma and Stage-2-Gemma, respectively." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 613, + 291, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 613, + 291, + 666 + ], + "spans": [ + { + "bbox": [ + 67, + 613, + 291, + 666 + ], + "type": "text", + "content": "First merge We merge the Gemma-2-9B-IT (Rivière et al., 2024) and Stage-2-Gemma into Gemma-2-9B using the DELLA Linear method. We refer to the model at this stage as the Merge-1-Gemma." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 667, + 291, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 667, + 291, + 734 + ], + "spans": [ + { + "bbox": [ + 67, + 667, + 291, + 734 + ], + "type": "text", + "content": "Helpfulness and preference alignment Using the Merge-1-Gemma as the base model, we performed one round of alignment using SimPO with the SEA-Preference dataset. We refer to the model at this stage as the Aligned-SimPO-Gemma." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "text", + "content": "Final merge Finally, using the Gemma-2-9B model as the base model, we merged Merge-1-Gemma, FuseChat Gemma-2-9B-Instruct (Yang" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 71, + 526, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 111 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 111 + ], + "type": "text", + "content": "et al., 2024b), Gemma-SEA-LION-9B, and Aligned-SimPO-Gemma into it to produce the final model Gemma-SEA-LION-9B-IT." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 123, + 380, + 135 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 123, + 380, + 135 + ], + "spans": [ + { + "bbox": [ + 302, + 123, + 380, + 135 + ], + "type": "text", + "content": "3.3 Discussion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 142, + 526, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 142, + 526, + 237 + ], + "spans": [ + { + "bbox": [ + 302, + 142, + 526, + 237 + ], + "type": "text", + "content": "This post-training workflow emphasizes the careful balance between general capabilities, SEA-specific linguistic fluency, and natural conversational abilities. Each step in the workflow is designed to progressively refine the model, ensuring it meets the diverse needs of users in the Southeast Asian region." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 238, + 526, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 238, + 526, + 333 + ], + "spans": [ + { + "bbox": [ + 302, + 238, + 526, + 333 + ], + "type": "text", + "content": "The entire post-training process for Gemma-SEA-LION-9B-IT and Llama-SEA-LION-8B-IT took approximately 1350 and 1024 GPU hours, respectively, on eight H100 GPUs. To make the training efficient, all post-training steps utilize Liger Kernel (Hsu et al., 2024) for substantial memory savings of approximately " + }, + { + "bbox": [ + 302, + 238, + 526, + 333 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 302, + 238, + 526, + 333 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 344, + 427, + 359 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 344, + 427, + 359 + ], + "spans": [ + { + "bbox": [ + 302, + 344, + 427, + 359 + ], + "type": "text", + "content": "4 Experimental Setup" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 368, + 430, + 381 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 368, + 430, + 381 + ], + "spans": [ + { + "bbox": [ + 302, + 368, + 430, + 381 + ], + "type": "text", + "content": "4.1 Competitive methods" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 386, + 526, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 386, + 526, + 495 + ], + "spans": [ + { + "bbox": [ + 302, + 386, + 526, + 495 + ], + "type": "text", + "content": "For the evaluation, we compared our models against well-known LLMs for multilingual and SEA languages, such as SeALMsv3 (Zhang et al., 2024a), Sailorv2 (Team, 2024), Qwen 2.5 (Yang et al., 2024a), Gemma 2 (Riviere et al., 2024) and Llama 3.1 (Dubey et al., 2024), where the parameters of those models are less than 10 billion parameters, similar to our models." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 507, + 443, + 518 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 507, + 443, + 518 + ], + "spans": [ + { + "bbox": [ + 302, + 507, + 443, + 518 + ], + "type": "text", + "content": "4.2 Evaluation Benchmarks" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 302, + 526, + 525, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 526, + 525, + 565 + ], + "spans": [ + { + "bbox": [ + 302, + 526, + 525, + 565 + ], + "type": "text", + "content": "To evaluate the robustness of our proposed models, we compare our models to competitors in three benchmarks." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 567, + 526, + 743 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 567, + 526, + 743 + ], + "spans": [ + { + "bbox": [ + 302, + 567, + 526, + 743 + ], + "type": "text", + "content": "SEA Benchmarks. We evaluated the multilingual performance of each LLM using the SEA-HELM Leaderboard (Leong et al., 2023; Susanto et al., 2025) " + }, + { + "bbox": [ + 302, + 567, + 526, + 743 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 302, + 567, + 526, + 743 + ], + "type": "text", + "content": ". We selected SEA-HELM because the design choice of this benchmark reflects the performance of SEA culture and knowledge the most compared with other existing benchmarks (DAMO-NLP-SG, 2024; Lovenia et al., 2024; Wang et al., 2024a). We also evaluate on a wide-range SEA coverage language benchmark called SEACrowd (Lovenia et al., 2024). This benchmark consists of all SEA languages for natural language understanding and generation datasets." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 752, + 525, + 775 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 752, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 752, + 525, + 775 + ], + "type": "text", + "content": "5Please visit https://leaderboard.sea-lion.ai/ for live score update of SEA-LION." + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 71, + 84, + 522, + 217 + ], + "blocks": [ + { + "bbox": [ + 264, + 70, + 328, + 82 + ], + "lines": [ + { + "bbox": [ + 264, + 70, + 328, + 82 + ], + "spans": [ + { + "bbox": [ + 264, + 70, + 328, + 82 + ], + "type": "text", + "content": "SEA-HELM" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 84, + 522, + 217 + ], + "lines": [ + { + "bbox": [ + 71, + 84, + 522, + 217 + ], + "spans": [ + { + "bbox": [ + 71, + 84, + 522, + 217 + ], + "type": "table", + "html": "
NU, NLG, NLR, NLIInstruction Following
ModelsAverageIDVITHTAIDVITH
Meta-Llama-3.1-8B35.3742.3340.6735.1338.8816.1919.059.00
SeaLLMs-v3-7B37.0444.7948.2943.5327.4526.6735.2426.00
Gemma-2-9B41.4847.6543.2842.0053.264.763.8110.00
Qwen2.5-7B41.9851.6352.1746.5536.6031.4336.1930.00
Sailor2-8B42.6253.2347.3346.6445.0430.4830.4835.00
Llama-SEA-LION-8B41.4244.9846.2542.7943.0325.7132.3823.00
Gemma-SEA-LION-9B48.6757.1649.3947.1660.5625.7120.0027.00
", + "image_path": "e406902fdae58a202c5ead85c91057043dad6359c378e31f2bf590aaf4e7548a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 71, + 272, + 522, + 372 + ], + "blocks": [ + { + "bbox": [ + 73, + 223, + 521, + 249 + ], + "lines": [ + { + "bbox": [ + 73, + 223, + 521, + 249 + ], + "spans": [ + { + "bbox": [ + 73, + 223, + 521, + 249 + ], + "type": "text", + "content": "Table 1: SEA-HELM multilingual benchmark on NLU, NLG, NLR, NLI and instruction following on base and continued pre-trained models of similar sizes." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 243, + 261, + 350, + 272 + ], + "lines": [ + { + "bbox": [ + 243, + 261, + 350, + 272 + ], + "spans": [ + { + "bbox": [ + 243, + 261, + 350, + 272 + ], + "type": "text", + "content": "Open LLM Leaderboard" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 272, + 522, + 372 + ], + "lines": [ + { + "bbox": [ + 71, + 272, + 522, + 372 + ], + "spans": [ + { + "bbox": [ + 71, + 272, + 522, + 372 + ], + "type": "table", + "html": "
ModelsAverageMMLU-PROBBHGPQAMATH Lvl 5IFEval (EN)MUSR
Meta-Llama-3.1-8B13.924.9525.296.325.1412.78.98
Sailor2-8B17.7125.7427.624.877.0221.9519.03
Gemma-2-9B21.1534.4834.110.5113.1420.414.3
SeaLLMs-v3-7B24.0035.7134.579.2818.8132.9412.68
Qwen2.5-7B24.9937.3935.819.9618.8833.7414.14
Llama-SEA-LION-8B16.6127.626.047.499.8916.5612.07
Gemma-SEA-LION-9B22.4132.7837.2410.299.8930.1214.11
", + "image_path": "4867daa85c3c9dd6e65602d241de43dd14c0427a828b2219e89a5413008cb6c3.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 77, + 380, + 513, + 393 + ], + "lines": [ + { + "bbox": [ + 77, + 380, + 513, + 393 + ], + "spans": [ + { + "bbox": [ + 77, + 380, + 513, + 393 + ], + "type": "text", + "content": "Table 2: Open LLM Leaderboard benchmarks across different continued pre-trained models of similar sizes." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 415, + 291, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 415, + 291, + 481 + ], + "spans": [ + { + "bbox": [ + 67, + 415, + 291, + 481 + ], + "type": "text", + "content": "However, due to maintenance reasons, we cannot reproduce the NLG benchmark of SEACrowd. Therefore, we experiment only with the NLU benchmark (zero-shot), which has 131 data subsets, 7 tasks, and 31 SEA indigenous languages." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 482, + 291, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 482, + 291, + 686 + ], + "spans": [ + { + "bbox": [ + 67, + 482, + 291, + 686 + ], + "type": "text", + "content": "English performance. We also evaluated the English performance of the models using the Open LLM Leaderboard (HuggingFace, 2024). This is because English is also widely used in SEA countries. Therefore, we need to evaluate the understanding and knowledge of LLMs in the English benchmark as well. The leaderboard consists of six benchmarks, IFEval (Zhou et al., 2023), Big Bench Hard (Suzgun et al., 2023), MATH (Hendrycks et al., 2021), GPQA (Rein et al., 2023), MuSR (Sprague et al., 2024) and MMLUPRO (Wang et al., 2024c). Moreover, we also evaluate the CPT models on SEA-HELM and the Open LLM Leaderboard since these benchmarks support the CPT evaluation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 698, + 199, + 711 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 698, + 199, + 711 + ], + "spans": [ + { + "bbox": [ + 67, + 698, + 199, + 711 + ], + "type": "text", + "content": "5 Experimental Results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 720, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 720, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 720, + 291, + 775 + ], + "type": "text", + "content": "To understand the robustness and generalization of our proposed models, we conduct three studies as follows. Section 5.1 evaluates the robustness of continual pre-training models using SEA-HELM" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 414, + 525, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 414, + 525, + 481 + ], + "spans": [ + { + "bbox": [ + 302, + 414, + 525, + 481 + ], + "type": "text", + "content": "and the Open LLM leaderboard. In Section 5.2, we compare our instruction fine-tuning models with competitors in three benchmarks to demonstrate the generalization of our models. Lastly, we discuss the design choice of our models in Section 5.3." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 497, + 480, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 497, + 480, + 510 + ], + "spans": [ + { + "bbox": [ + 302, + 497, + 480, + 510 + ], + "type": "text", + "content": "5.1 Continued Pre-Training Results" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 301, + 518, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 518, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 301, + 518, + 526, + 775 + ], + "type": "text", + "content": "SEA performance. The CPT stage is primarily focused on gaining SEA language capabilities and knowledge. For the purpose of comparison against base and CPT models, as shown in Table 1, we observed a 6.05 and 7.19 average SEA-HELM performance increase over the Meta-Llama-3.1-8B and Gemma-2-9B for Llama-SEA-LION-8B and Gemma-SEA-LION-9B, respectively. We observed a much larger average increase with instruction following capabilities in particular, which we attribute to the fact that our CPT models are trained from the instruction models rather than from the base models. Moreover, in the average performance, we found that our Gemma-SEA-LION-9B models perform the best compared to other models. This emphasizes a strong reason to perform CPT for improving the performance of SEA languages, rather than skipping the CPT and performing SFT directly." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 71, + 80, + 523, + 284 + ], + "blocks": [ + { + "bbox": [ + 271, + 70, + 322, + 79 + ], + "lines": [ + { + "bbox": [ + 271, + 70, + 322, + 79 + ], + "spans": [ + { + "bbox": [ + 271, + 70, + 322, + 79 + ], + "type": "text", + "content": "SEA-HELM" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 80, + 523, + 284 + ], + "lines": [ + { + "bbox": [ + 71, + 80, + 523, + 284 + ], + "spans": [ + { + "bbox": [ + 71, + 80, + 523, + 284 + ], + "type": "table", + "html": "
NUL, NLG, NLR, NLIInstruction FollowingMTBench
ModelsAverageIDVITHTAIDVITHIDVITH
SeaLLMs-v3-7B-Chat39.1942.7248.5042.5912.0657.1453.3347.0059.8165.2456.59
Llama-3.1-8B-Instruct41.4851.5051.3145.3215.4077.1475.2463.0056.3857.5954.34
Sailor2-8B-Chat43.1348.9848.0145.4428.2949.5245.7140.0069.7666.9773.94
Qwen2.5-7B-Instruct44.5860.2853.4653.4321.0381.9069.5266.0065.6666.8068.71
Gemma-2-9B-IT55.3364.0459.8657.2252.2888.5778.1071.0068.7868.3773.51
Stage-1-Llama50.7651.8451.8346.2327.5369.5273.3359.0042.7446.4146.46
Stage-2-Llama59.4953.8755.1850.9244.8077.1476.1967.0050.9053.7246.97
Merge-1-Llama59.3656.7356.8251.7146.6381.9082.8667.0057.0454.0150.28
Merge-2-Llama58.0159.1952.6351.8935.4087.6280.9578.0056.3859.3258.86
Aligned-SimPO-Llama51.3054.8651.6946.7726.4082.8680.0068.0068.2064.6864.92
Llama-SEA-LION-8B-IT61.8460.5061.4855.9243.6184.7685.7176.0062.6568.3265.13
Stage-1-Gemma56.5655.0654.5151.9642.7466.6774.2961.0047.3547.2655.05
Stage-2-Gemma66.6664.1061.7656.9057.8589.5282.8676.0060.5458.9358.76
Merge-1-Gemma69.2666.2564.9559.7460.4189.5291.4382.0066.4564.4765.00
Aligned-SimPO-Gemma69.3765.6965.4759.5157.3886.6788.5778.0068.8973.6773.51
Gemma-SEA-LION-9B-IT69.3566.2664.9359.2358.8294.2988.5778.0065.8573.2769.07
", + "image_path": "0f75f24e6df40fe78b1dd64776ad1d19b8df275d0fba107911bc92e1b8175379.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 72, + 329, + 409, + 455 + ], + "blocks": [ + { + "bbox": [ + 72, + 329, + 409, + 455 + ], + "lines": [ + { + "bbox": [ + 72, + 329, + 409, + 455 + ], + "spans": [ + { + "bbox": [ + 72, + 329, + 409, + 455 + ], + "type": "image", + "image_path": "95807f122e753274efc4c43da49e9d40d32d3ff051642f5e33ffc65fc2dd8d5a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 84, + 466, + 394, + 480 + ], + "lines": [ + { + "bbox": [ + 84, + 466, + 394, + 480 + ], + "spans": [ + { + "bbox": [ + 84, + 466, + 394, + 480 + ], + "type": "text", + "content": "Figure 3: Zero-shot model performance across NLU tasks in SEA languages." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 418, + 349, + 523, + 429 + ], + "blocks": [ + { + "bbox": [ + 68, + 292, + 524, + 316 + ], + "lines": [ + { + "bbox": [ + 68, + 292, + 524, + 316 + ], + "spans": [ + { + "bbox": [ + 68, + 292, + 524, + 316 + ], + "type": "text", + "content": "Table 3: SEA-HELM multilingual benchmark on NLU, NLG, NLR, NLI, instruction following and multi-turn chat on instruct models of similar sizes." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 418, + 349, + 523, + 429 + ], + "lines": [ + { + "bbox": [ + 418, + 349, + 523, + 429 + ], + "spans": [ + { + "bbox": [ + 418, + 349, + 523, + 429 + ], + "type": "table", + "html": "
ModelNLU Score
SeaLLMs-v3-7B-chat52.68
Llama-3.1-8B-Instruct49.94
Sailor2-8B-Chat60.21
Qwen2.5-7B-Instruct54.51
Gemma-2-9B-IT60.21
Llama-SEA-LION-8B-IT55.10
Gemma-SEA-LION-9B-IT64.13
", + "image_path": "4e16748c2d5d2a0090d54b396c70b170858f6680defc5b52a25cfe87b65aacd2.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 417, + 437, + 525, + 485 + ], + "lines": [ + { + "bbox": [ + 417, + 437, + 525, + 485 + ], + "spans": [ + { + "bbox": [ + 417, + 437, + 525, + 485 + ], + "type": "text", + "content": "Table 4: The average NLU performance across 131 data subsets and 31 indigenous languages." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 501, + 291, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 501, + 291, + 675 + ], + "spans": [ + { + "bbox": [ + 67, + 501, + 291, + 675 + ], + "type": "text", + "content": "English performance. For the English performance, as shown in Table 2, both CPT models also managed to perform competitively against the Meta-Llama-3.1-8B and Gemma-2-9B base models on the Open LLM Leaderboard benchmarks. This indicates that our choice of retraining with a proportion of " + }, + { + "bbox": [ + 67, + 501, + 291, + 675 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 67, + 501, + 291, + 675 + ], + "type": "text", + "content": " English tokens has been beneficial in mitigating catastrophic forgetting, which has been shown to stem from CPT (Zheng et al., 2024a). Although our CPT models perform lower than Qwen and SeaLLMs on this benchmark, we outperform them on the SEA language instead, which is the main focus of this work." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 687, + 242, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 687, + 242, + 700 + ], + "spans": [ + { + "bbox": [ + 67, + 687, + 242, + 700 + ], + "type": "text", + "content": "5.2 Instruction Fine-tuning Results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 707, + 290, + 745 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 707, + 290, + 745 + ], + "spans": [ + { + "bbox": [ + 67, + 707, + 290, + 745 + ], + "type": "text", + "content": "In this study, we compare our models with competitors on SEA-HELM, SEACrowd, and the Open LLM Leaderboard as follows." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "content": "SEA-HELM. As shown in Table 3, the SEA-HELM benchmark performance demonstrates that" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 500, + 526, + 757 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 500, + 526, + 757 + ], + "spans": [ + { + "bbox": [ + 302, + 500, + 526, + 757 + ], + "type": "text", + "content": "our instruct models, Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, attain competitive performance in SEA languages, with Gemma-SEA-LION-9B-IT achieving one of the highest average performances. Moreover, we significantly improve the performance of Llama-3.1-8B-Instruct from 41.48 to 61.84 using Llama-SEA-LION-8B-IT, while Gemma-SEA-LION-9B-IT achieves 14.02 improvement points compared to Gemma-2-9B-IT. Both Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT outperform other SEA languages-focused LLMs, such as *Sailor2-8B-Chat* and *SEALLMs-v3-7B-Chat*, with an average score of 69.35 across all the languages covered by the SEAHELM benchmark, apart from the SEA-MTbench tasks. This conforms with the previous results on the CPT models (Section 5.1) that our CPT model performs the best on SEA languages, resulting in the best performer in this experiment." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 761, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 761, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 761, + 525, + 775 + ], + "type": "text", + "content": "SEACrowd. Other than evaluating on some SEA" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 71, + 80, + 522, + 284 + ], + "blocks": [ + { + "bbox": [ + 245, + 70, + 348, + 80 + ], + "lines": [ + { + "bbox": [ + 245, + 70, + 348, + 80 + ], + "spans": [ + { + "bbox": [ + 245, + 70, + 348, + 80 + ], + "type": "text", + "content": "Open LLM Leaderboard" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 80, + 522, + 284 + ], + "lines": [ + { + "bbox": [ + 71, + 80, + 522, + 284 + ], + "spans": [ + { + "bbox": [ + 71, + 80, + 522, + 284 + ], + "type": "table", + "html": "
ModelsAverageMMLU-PROBBHGPQAMATH Lvl 5IFEval (EN)MUSR
Sailor2-8B-Chat16.3727.9327.153.470.0037.492.19
SeaLLMs-v3-7B-Chat22.4933.9324.377.2715.8644.109.38
Llama-3.1-8B-Instruct27.8829.3626.1010.6317.4577.036.75
Qwen2.5-7B-Instruct27.9337.0034.7210.180.0076.349.34
Gemma-2-9B-IT28.8631.9542.1414.770.2374.369.74
Stage-1-Llama24.5125.8726.327.8319.2662.894.88
Stage-2-Llama27.7528.1024.647.7219.5678.787.74
Merge-1-Llama27.4927.4726.228.2819.7976.167.04
Merge-2-Llama29.9629.9228.789.9619.9482.618.54
Aligned-SimPO-Llama30.5830.8434.318.3926.5975.767.61
Llama-SEA-LION-8B-IT30.3931.0129.4710.4022.5880.358.54
Stage-1-Gemma29.8833.3438.5110.7424.1756.8715.66
Stage-2-Gemma33.4834.6736.0611.7420.7783.0014.61
Merge-1-Gemma35.1536.2241.4215.3226.2882.099.59
Aligned-SimPO-Gemma35.3137.6542.3814.9927.7980.238.82
Gemma-SEA-LION-9B-IT35.4336.9443.3915.1024.2481.8511.07
", + "image_path": "78429de77740e6d1e87d54a49d5ac0a30a1623513b766b33f3be80741d5f7606.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 292, + 485, + 304 + ], + "lines": [ + { + "bbox": [ + 107, + 292, + 485, + 304 + ], + "spans": [ + { + "bbox": [ + 107, + 292, + 485, + 304 + ], + "type": "text", + "content": "Table 5: Open LLM Leaderboard benchmarks across different instruct models of similar sizes." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 69, + 326, + 290, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 326, + 290, + 596 + ], + "spans": [ + { + "bbox": [ + 69, + 326, + 290, + 596 + ], + "type": "text", + "content": "languages like SEA-HELM, we also evaluated our model compared to competitors on 31 SEA indigenous languages using SEACrowd-NLU. Note that, for this study, we use only the best settings of our models from the previous experiment (Table 3). As shown in Table 4, we observe a state-of-the-art result from Gemma-SEA-LION-9B-IT by achieving 64.13 points on the NLU benchmark, while Llama-SEA-LION-8B-IT improves its baseline from 49.94 to 55.10 points. Moreover, the results from Figure 3 also emphasize the robustness of our model by reaching more than 80 points on this benchmark, while SeaLLMs and Llama-3.1 have only a few cases where the performance exceeds 80 points. These results emphasize the robustness of our models by achieving the state-of-the-art with a model parameter less than 10B on SEA benchmarks, including both traditional classical NLP benchmark (SEACrowd-NLU) and modern LLM benchmark (SEA-HELM)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 599, + 290, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 599, + 290, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 599, + 290, + 774 + ], + "type": "text", + "content": "English performance. We also evaluate the performance of a widely used language, English, to observe a difference between the results of SEA and English. The Open LLM Leaderboard performance is shown in Table 5. Both Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT performed competitively in English language, math, and reasoning tasks, with Gemma-SEA-LION-9B-IT achieving the highest average score of 35.43. Moreover, we notice that the SEA models (Sailor and SeaLLMs) failed to perform on the English dataset. This might be because these models are optimized for SEA languages during supervised fine-tuning, and English" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 327, + 524, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 327, + 524, + 378 + ], + "spans": [ + { + "bbox": [ + 305, + 327, + 524, + 378 + ], + "type": "text", + "content": "performance decreased as a result. In contrast, our models balance the performance between SEA and English knowledge, resulting in a high score for all benchmarks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 391, + 432, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 391, + 432, + 403 + ], + "spans": [ + { + "bbox": [ + 305, + 391, + 432, + 403 + ], + "type": "text", + "content": "5.3 Performance Analysis" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 409, + 525, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 409, + 525, + 448 + ], + "spans": [ + { + "bbox": [ + 305, + 409, + 525, + 448 + ], + "type": "text", + "content": "In this study, we discuss the performance improvement in each design decision of our models (Tables 3 and 5) as follows." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 450, + 525, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 450, + 525, + 624 + ], + "spans": [ + { + "bbox": [ + 305, + 450, + 525, + 624 + ], + "type": "text", + "content": "Stage 1: English instruction fine tuning In Stage 1 IFT, the focus is predominantly on gaining general capabilities in math, code and general instruction following in the English language. Although our CPT models are based off of the instruct versions of Llama-3.1-8B, the CPT process has eroded the instruction following capabilities (See Table 5). We observe an increase of 3.86 and 9.72 for Stage-1-Llama and Stage-1-Gemma respectively in English instruction following capabilities on the IFEval benchmark. We also observe an average increase of 7.9 for Stage-1-Llama and 7.47 for Stage-1-Gemma for the SEA-HELM benchmark." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 626, + 525, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 626, + 525, + 732 + ], + "spans": [ + { + "bbox": [ + 305, + 626, + 525, + 732 + ], + "type": "text", + "content": "Stage 2: Multilingual instruction fine tuning In Stage 2 IFT, the focus is on multilingual and reasoning capabilities. By instruction fine tuning on SEA languages and higher complexity English instruction pairs, the Stage 2 models saw an average increase of 8.73 for Stage-2-Llama and 10.1 for Stage-2-Gemma over Stage 1 models on the SEAHELM benchmark." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 735, + 525, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 735, + 525, + 774 + ], + "spans": [ + { + "bbox": [ + 305, + 735, + 525, + 774 + ], + "type": "text", + "content": "Merge 1: Combining Stage 1 and Stage 2 Despite the significant gains observed in Stage 1 and 2, we observed that the effects of catastrophic for" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 290, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 290, + 164 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 290, + 164 + ], + "type": "text", + "content": "getting from earlier stages could still be observed after Stage 2. In order to mitigate this, we merge Stage 1 and Stage 2 models into the CPT model, after which we we observed an average increase of 2.6 for Merge-1-Gemma. We also observed an increase across all SEA-HELM benchmark tasks for Merge-1-Llama." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 166, + 291, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 166, + 291, + 327 + ], + "spans": [ + { + "bbox": [ + 69, + 166, + 291, + 327 + ], + "type": "text", + "content": "Merge 2: Incorporating instruct models To reintroduce helpfulness, relevance and informativeness of responses observed in Llama 3.1 and Gemma 2 models, we perform further merges of open-source instruct models. While we observed significant increases in MT-Bench benchmark scores for Vietnamese and Thai, we also observed a slight degradation of average SEA-HELM performance as well as a slight degradation of Indonesian MT-Bench scores, which we view as acceptable trade-offs for the significant performance increases in Vietnamese and Thai." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 328, + 291, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 328, + 291, + 503 + ], + "spans": [ + { + "bbox": [ + 69, + 328, + 291, + 503 + ], + "type": "text", + "content": "Alignment steps In the alignment step to align the models to human preference, we prioritize the SEA MTBench performance over the other SEA-HELM benchmark tasks. We observed a broad increase in SEA MTBench performances across all languages for both models. However, this comes with minor degradation of instruction following capabilities and overall Indonesian SEA-HELM performance. The alignment step encourages longer, more helpful and sensitive responses but hurts performance on task-specific benchmarks and instruction following in some languages – an issue we address in the next step." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 504, + 291, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 504, + 291, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 504, + 291, + 774 + ], + "type": "text", + "content": "Final merge: Combining aligned models To compensate for the capability degradation in the previous steps, we merge Merge-2-Llama and Merge-1-Gemma with Aligned-SimPO-Llama and Aligned-SimPO-Gemma and various open sourced pretrained models describe in sections 3.2.1 and 3.2.2 for their respective model families. For Llama-SEA-LION-8B-IT, we observed a significant increase in average SEA-HELM performance (61.84) from the alignment stage (51.30), mainly from the increase in performance for the core tasks in SEA-HELM. This performance increase demonstrates the value of empirical selection of pre-trained models to be merged in based on each model's strengths and weaknesses to produce a far superior model. For Gemma-SEA-LION-9B-IT, it easily achieves higher performance compared to the Llama-SEA-LION-8B-IT with fewer post training steps. We attribute this performance to the high performance of the base Gemma 2 model and also to the larger vocab" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 303, + 71, + 524, + 98 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 71, + 524, + 98 + ], + "spans": [ + { + "bbox": [ + 303, + 71, + 524, + 98 + ], + "type": "text", + "content": "ulary size which have been demonstrated (Takase et al., 2024) to produce better models." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 303, + 110, + 400, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 110, + 400, + 121 + ], + "spans": [ + { + "bbox": [ + 303, + 110, + 400, + 121 + ], + "type": "text", + "content": "6 Related Works" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 303, + 132, + 526, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 132, + 526, + 401 + ], + "spans": [ + { + "bbox": [ + 303, + 132, + 526, + 401 + ], + "type": "text", + "content": "Recently, researchers have proposed large language models that support multilingual settings. Llama (Dubey et al., 2024) is the prior effort to release an open-source large language model for the research community to develop their own models. Then, Qwen (Yang et al., 2024a) and Gemma (Rivière et al., 2024) introduced open-source LLMs that perform comparably or better than Llama with a larger amount of training data and many supported languages for these recent models. Massively multilingual open-source models like Bloom (Scao et al., 2022) and Aya (Ustun et al., 2024) also support a very wide range of languages, including some SEA languages. Although these models demonstrate a robust performance in English benchmarks, they mostly underperformed on SEA benchmarks that tested for SEA languages, SEA knowledge and cultural understanding (Lovenia et al., 2024; Susanto et al., 2025), presumably due to a lack of language support for certain SEA languages or cultures." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 303, + 403, + 525, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 403, + 525, + 632 + ], + "spans": [ + { + "bbox": [ + 303, + 403, + 525, + 632 + ], + "type": "text", + "content": "In the SEA community, many works propose a large language model that is designed specifically for SEA languages by adding more SEA tokens in the training process, such as SeaLLMs (Nguyen et al., 2024) and Sailor (Sailor2 Team, 2024). However, the performance of these models is robust only on in-domain datasets or favors only some tasks (i.e., classical NLP datasets). This is because the design choice in the pre-training or fine-tuning of these models is not well studied, e.g., performing a single SFT step with low-quality datasets written in some SEA languages, resulting in a slight improvement on SEA benchmarks. To create a robust SEA LLM, we need to carefully balance language representation and design both pre-training and post-training (i.e., SFT, alignment, and model merging) for SEA contexts." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 645, + 381, + 657 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 645, + 381, + 657 + ], + "spans": [ + { + "bbox": [ + 303, + 645, + 381, + 657 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 303, + 667, + 526, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 667, + 526, + 774 + ], + "spans": [ + { + "bbox": [ + 303, + 667, + 526, + 774 + ], + "type": "text", + "content": "Despite the sizable population and language diversity in Southeast Asia, there remains a scarcity of resources and accurate linguistic and cultural representation with open-source LLMs. In this paper, we introduce Llama-SEA-LION-8B-IT and Gemma-SEA-LION-9B-IT, two multilingual LLMs comprehensively trained to achieve state-of-the-art performances in SEA languages, based on the Llama and" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 293, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 293, + 248 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 293, + 248 + ], + "type": "text", + "content": "Gemma family of LLMs. SEA-LION represents the next advancement in the development of LLMs that explicitly supports SEA languages. Both models are fully open-source and available for commercial use to increase accessibility and innovation in multilingual LLMs in Southeast Asia. We will make our resources publicly available — including the dataset, training scripts, training checkpoints, and all fine-tuned models, even those that achieve state-of-the-art performance on the benchmarks — to establish solid baselines, ensure reproducibility, and support future research focused on culturally and professionally relevant SEA applications." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 259, + 161, + 273 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 259, + 161, + 273 + ], + "spans": [ + { + "bbox": [ + 68, + 259, + 161, + 273 + ], + "type": "text", + "content": "Acknowledgment" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 280, + 291, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 280, + 291, + 376 + ], + "spans": [ + { + "bbox": [ + 67, + 280, + 291, + 376 + ], + "type": "text", + "content": "This research is supported by the National Research Foundation, Singapore, under its National Large Language Models Funding Initiative. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author(s) and do not reflect the views of the National Research Foundation, Singapore." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 387, + 126, + 399 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 387, + 126, + 399 + ], + "spans": [ + { + "bbox": [ + 68, + 387, + 126, + 399 + ], + "type": "text", + "content": "Limitation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 409, + 291, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 409, + 291, + 584 + ], + "spans": [ + { + "bbox": [ + 67, + 409, + 291, + 584 + ], + "type": "text", + "content": "Although we propose the state-of-the-art SEA LLMs, we found that the benchmark might not cover all the properties and languages we want to evaluate. For example, SEA-HELM is a robustness benchmark, but only covers four languages. SEACrowd is a benchmark that covers all SEA languages, but it is only classical NLP datasets (no chat or instruction following datasets). We require a more holistic SEA benchmark that covers LLM-specific tasks written in all SEA languages. However, with the current evaluation design choice, these benchmarks are the best design choice for current SEA research works." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 586, + 291, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 586, + 291, + 706 + ], + "spans": [ + { + "bbox": [ + 67, + 586, + 291, + 706 + ], + "type": "text", + "content": "Moreover, we conduct experiments using only 8 and 9 billion parameter models. We argue that this is the most commonly used model size in real-world scenarios. In addition, our method can and should also work with a higher or smaller model size since our proposed technique does not rely on the model size, as we demonstrated by applying the SFT and alignment techniques on both Llama and Gemma models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 731, + 127, + 743 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 731, + 127, + 743 + ], + "spans": [ + { + "bbox": [ + 68, + 731, + 127, + 743 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 751, + 290, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 751, + 290, + 773 + ], + "spans": [ + { + "bbox": [ + 68, + 751, + 290, + 773 + ], + "type": "text", + "content": "SCB 10X, VISTEC, and SEACrowd. 2024. Thai llm leaderboard." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 72, + 526, + 775 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 304, + 72, + 506, + 84 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 506, + 84 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 506, + 84 + ], + "type": "text", + "content": "AI Singapore AI Products Team. 2024. Sea-helm." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 92, + 470, + 104 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 92, + 470, + 104 + ], + "spans": [ + { + "bbox": [ + 304, + 92, + 470, + 104 + ], + "type": "text", + "content": "AISG AI Singapore. 2023. Sea-lion-pile." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 112, + 483, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 112, + 483, + 124 + ], + "spans": [ + { + "bbox": [ + 304, + 112, + 483, + 124 + ], + "type": "text", + "content": "AISG AI Singapore. 2025. Sea-lion-pile-v2." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 132, + 526, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 132, + 526, + 211 + ], + "spans": [ + { + "bbox": [ + 304, + 132, + 526, + 211 + ], + "type": "text", + "content": "Anton Alexandrov, Veselin Raychev, Mark Niklas Mueller, Ce Zhang, Martin Vechev, and Kristina Toutanova. 2024. Mitigating catastrophic forgetting in language transfer via model merging. In Findings of the Association for Computational Linguistics: EMNLP 2024, pages 17167-17186, Miami, Florida, USA. Association for Computational Linguistics." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 218, + 478, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 218, + 478, + 230 + ], + "spans": [ + { + "bbox": [ + 304, + 218, + 478, + 230 + ], + "type": "text", + "content": "Arcee-AI. 2024. Llama-3.1-supernova-lite." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 238, + 526, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 238, + 526, + 327 + ], + "spans": [ + { + "bbox": [ + 304, + 238, + 526, + 327 + ], + "type": "text", + "content": "Adrien Barbaresi. 2021. Trafilatura: A Web Scraping Library and Command-Line Tool for Text Discovery and Extraction. In Proceedings of the Joint Conference of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing: System Demonstrations, pages 122-131. Association for Computational Linguistics." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 335, + 526, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 335, + 526, + 358 + ], + "spans": [ + { + "bbox": [ + 304, + 335, + 526, + 358 + ], + "type": "text", + "content": "BAAI Beijing Academy of Artificial Intelligence. 2024. Infinity instruct." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 366, + 526, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 366, + 526, + 531 + ], + "spans": [ + { + "bbox": [ + 304, + 366, + 526, + 531 + ], + "type": "text", + "content": "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners. In Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 540, + 459, + 551 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 540, + 459, + 551 + ], + "spans": [ + { + "bbox": [ + 304, + 540, + 459, + 551 + ], + "type": "text", + "content": "CommonCrawl. 2024. Commoncrawl." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 560, + 526, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 560, + 526, + 647 + ], + "spans": [ + { + "bbox": [ + 304, + 560, + 526, + 647 + ], + "type": "text", + "content": "Ganqu Cui, Lifan Yuan, Ning Ding, Guanming Yao, Bingxiang He, Wei Zhu, Yuan Ni, Guotong Xie, Ruobing Xie, Yankai Lin, Zhiyuan Liu, and Maosong Sun. 2024. ULTRAFEEDBACK: boosting language models with scaled AI feedback. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 656, + 443, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 656, + 443, + 667 + ], + "spans": [ + { + "bbox": [ + 304, + 656, + 443, + 667 + ], + "type": "text", + "content": "DAMO-NLP-SG. 2024. Seaexam." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 677, + 526, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 526, + 721 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 526, + 721 + ], + "type": "text", + "content": "Longxu Dou, Qian Liu, Guangtao Zeng, Jia Guo, Jiahui Zhou, Wei Lu, and Min Lin. 2024. *Sailor: Open language models for south-east asia. CoRR*, abs/2404.03608." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 729, + 526, + 775 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 729, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 304, + 729, + 526, + 775 + ], + "type": "text", + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, Anirudh Goyal, Anthony Hartshorn, Aobo Yang," + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 79, + 72, + 291, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 72, + 291, + 390 + ], + "spans": [ + { + "bbox": [ + 79, + 72, + 291, + 390 + ], + "type": "text", + "content": "Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, Aurélien Rodriguez, Austen Gregerson, Ava Spataru, Baptiste Rozière, Bethany Biron, Binh Tang, Bobbie Chern, Charlotte Caucheteux, Chaya Nayak, Chloe Bi, Chris Marra, Chris McConnell, Christian Keller, Christophe Touret, Chunyang Wu, Corinne Wong, Cristian Canton Ferrer, Cyrus Nikolaidis, Damien Alonsius, Daniel Song, Danielle Pintz, Danny Livshits, David Esiobu, Dhruv Choudhary, Dhruv Mahajan, Diego Garcia-Olano, Diego Perino, Dieuwke Hupkes, Egor Lakomkin, Ehab AlBadawy, Elina Lobanova, Emily Dinan, Eric Michael Smith, Filip Radenovic, Frank Zhang, Gabriel Synnaeve, Gabrielle Lee, Georgia Lewis Anderson, Graeme Nail, Grégoire Mialon, Guan Pang, Guillem Cucurell, Hailey Nguyen, Hannah Korevaar, Hu Xu, Hugo Touvron, Iliyan Zarov, Imanol Arrieta Ibarra, Isabel M. Kloumann, Ishan Misra, Ivan Evtimov, Jade Copet, Jaewon Lee, Jan Geffert, Jana Vranes, Jason Park, Jay Mahadeokar, Jeet Shah, Jelmer van der Linde, Jennifer Billock, Jenny Hong, Jenya Lee, Jeremy Fu, Jianfeng Chi, Jianyu Huang, Jiawen Liu, Jie Wang, Jiecao Yu, Joanna Bitton, Joe Spisak, Jongsoo Park, Joseph Rocca, Joshua Johnstun, Joshua Saxe, Junteng Jia, Kalyan Vasuden Alwala, Kartikeya Upasani, Kate Plawiak, Ke Li, Kenneth Heafield, Kevin Stone, and et al. 2024. The llama 3 herd of models. CoRR abs/2407.21783." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 400, + 289, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 400, + 289, + 423 + ], + "spans": [ + { + "bbox": [ + 69, + 400, + 289, + 423 + ], + "type": "text", + "content": "Wikipedia Foundation. 2024. Wikipedia enterprise. html dumps downloads." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 433, + 291, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 433, + 291, + 533 + ], + "spans": [ + { + "bbox": [ + 69, + 433, + 291, + 533 + ], + "type": "text", + "content": "Charles Goddard, Shamane Siriwardhana, Malikeh Ehghaghi, Luke Meyers, Vladimir Karpukhin, Brian Benedict, Mark McQuade, and Jacob Solawetz. 2024. Arcee's mergekit: A toolkit for merging large language models. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: EMNLP 2024 - Industry Track, Miami, Florida, USA, November 12-16, 2024, pages 477-485. Association for Computational Linguistics." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 543, + 290, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 543, + 290, + 619 + ], + "spans": [ + { + "bbox": [ + 69, + 543, + 290, + 619 + ], + "type": "text", + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021. Measuring mathematical problem solving with the MATH dataset. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 630, + 290, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 630, + 290, + 686 + ], + "spans": [ + { + "bbox": [ + 69, + 630, + 290, + 686 + ], + "type": "text", + "content": "Pin-Lun Hsu, Yun Dai, Vignesh Kothapalli, Qingquan Song, Shao Tang, Siyu Zhu, Steven Shimizu, Shivam Sahni, Haowen Ning, and Yanning Chen. 2024. Liger kernel: Efficient triton kernels for lIm training. arXiv preprint arXiv:2410.10989." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 696, + 291, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 696, + 291, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 696, + 291, + 774 + ], + "type": "text", + "content": "Shengding Hu, Yuge Tu, Xu Han, Chaoqun He, Ganqu Cui, Xiang Long, Zhi Zheng, Yewei Fang, Yuxiang Huang, Weilin Zhao, Xinrong Zhang, Zhen Leng Thai, Kai Zhang, Chongyi Wang, Yuan Yao, Chenyang Zhao, Jie Zhou, Jie Cai, Zhongwu Zhai, Ning Ding, Chao Jia, Guoyang Zeng, Dahai Li, Zhiyuan Liu, and Maosong Sun. 2024. Minicpm: Un" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 315, + 72, + 524, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 72, + 524, + 95 + ], + "spans": [ + { + "bbox": [ + 315, + 72, + 524, + 95 + ], + "type": "text", + "content": "veiling the potential of small language models with scalable training strategies. CoRR, abs/2404.06395." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 101, + 480, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 101, + 480, + 114 + ], + "spans": [ + { + "bbox": [ + 304, + 101, + 480, + 114 + ], + "type": "text", + "content": "HuggingFace. 2024. Open llm leaderboard." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 121, + 526, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 121, + 526, + 187 + ], + "spans": [ + { + "bbox": [ + 304, + 121, + 526, + 187 + ], + "type": "text", + "content": "Gabriel Ilharco, Marco Túlio Ribeiro, Mitchell Wortsman, Ludwig Schmidt, Hannaneh Hajishirzi, and Ali Farhadi. 2023. Editing models with task arithmetic. In *The Eleventh International Conference on Learning Representations*, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 194, + 526, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 194, + 526, + 271 + ], + "spans": [ + { + "bbox": [ + 304, + 194, + 526, + 271 + ], + "type": "text", + "content": "Armand Joulin, Edouard Grave, Piotr Bojanowski, and Tomas Mikolov. 2017. Bag of tricks for efficient text classification. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 2, Short Papers, pages 427-431. Association for Computational Linguistics." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 279, + 526, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 279, + 526, + 344 + ], + "spans": [ + { + "bbox": [ + 304, + 279, + 526, + 344 + ], + "type": "text", + "content": "Wei Qi Leong, Jian Gang Ngui, Yosephine Susanto, Hamsawardhini Rengarajan, Kengatharayer Sarveswaran, and William-Chandra Tjhi. 2023. BHASA: A holistic southeast asian linguistic and cultural evaluation suite for large language models. CoRR, abs/2309.06085." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 353, + 526, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 353, + 526, + 407 + ], + "spans": [ + { + "bbox": [ + 304, + 353, + 526, + 407 + ], + "type": "text", + "content": "Ilya Loshchilov and Frank Hutter. 2019. Decoupled weight decay regularization. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 415, + 526, + 745 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 415, + 526, + 745 + ], + "spans": [ + { + "bbox": [ + 304, + 415, + 526, + 745 + ], + "type": "text", + "content": "Holy Lovenia, Rahmad Mahendra, Salsabil Maulana Akbar, Lester James V. Miranda, Jennifer Santoso, Elyanah Aco, Akhdan Fadhilah, Jonibek Mansurov, Joseph Marvin Imperial, Onno Kampman, Joel Ruben Antony Moniz, Muhammad Ravi Shulthan Habibi, Frederikus Hudi, Jann Montalan, Ryan Hadiwijaya, Joanito Agili Lopo, William Nixon, Borje Karlsson, James Jaya, Ryandito Diandaru, Yuze Gao, Patrick Amadeus Irawan, Bin Wang, Jan Christian Blaise Cruz, Chenxi Whitehouse, Ivan Halim Parmonangan, Maria Khelli, Wenyu Zhang, Lucky Susanto, Reynard Adha Ryanda, Sonny Lazuardi Hermawan, Dan John Velasco, Muhammad Dehan Al Koutsar, Willy Fitra Hendria, Yasmin Moslem, Noah Flynn, Muhammad Farid Adilazuarda, Haochen Li, Johannes Lee, R. Damanhuri, Shuo Sun, Muhammad Reza Qorib, Amirbek Djanibekov, Wei Qi Leong, Quyet V. Do, Niklas Muennighoff, Tanrada Pansuwan, Ilham Firdausi Putra, Yan Xu, Ngee Tai Chia, Ayu Purwarianti, Sebastian Ruder, William-Chandra Tjhi, Peerat Limkonchotiwat, Alham Fikri Aji, Sedrick Keh, Genta Indra Winata, Ruochen Zhang, Fajri Koto, Zheng Xin Yong, and Samuel Cahyawijaya. 2024. Seacrowd: A multilingual multimodal data hub and benchmark suite for southeast asian languages. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami FL, USA, November 12-16, 2024, pages 5155-5203. Association for Computational Linguistics." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 751, + 526, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 751, + 526, + 774 + ], + "spans": [ + { + "bbox": [ + 304, + 751, + 526, + 774 + ], + "type": "text", + "content": "Anton Lozhkov, Raymond Li, Loubna Ben Allal, Federico Cassano, Joel Lamy-Poirier, Nouamane Tazi," + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 79, + 72, + 291, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 72, + 291, + 259 + ], + "spans": [ + { + "bbox": [ + 79, + 72, + 291, + 259 + ], + "type": "text", + "content": "Ao Tang, Dmytro Pykhtar, Jiawei Liu, Yuxiang Wei, Tianyang Liu, Max Tian, Denis Kocetkov, Arthur Zucker, Younes Belkada, Zijian Wang, Qian Liu, Dmitry Abulkhanov, Indraneil Paul, Zhuang Li, Wen-Ding Li, Megan Risdal, Jia Li, Jian Zhu, Terry Yue Zhuo, Evgenii Zheltonozhskii, Nii Osae Osae Dade, Wenhao Yu, Lucas Krauß, Naman Jain, Yixuan Su, Xuanli He, Manan Dey, Edoardo Abati, Yekun Chai, Niklas Muennighoff, Xiangru Tang, Muhtasham Oblokulov, Christopher Akiki, Marc Marone, Cheng-hao Mou, Mayank Mishra, Alex Gu, Binyuan Hui, Tri Dao, Armel Zebaze, Olivier Dehaene, Nicolas Patry, Canwen Xu, Julian J. McAuley, Han Hu, Torsten Scholak, Sébastien Paquet, Jennifer Robinson, Carolyn Jane Anderson, Nicolas Chapados, and et al. 2024. Starcoder 2 and the stack v2: The next generation. CoRR, abs/2402.19173." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 266, + 291, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 266, + 291, + 301 + ], + "spans": [ + { + "bbox": [ + 69, + 266, + 291, + 301 + ], + "type": "text", + "content": "Yu Meng, Mengzhou Xia, and Danqi Chen. 2024. Simpo: Simple preference optimization with a reference-free reward. CoRR, abs/2405.14734." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 307, + 291, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 307, + 291, + 418 + ], + "spans": [ + { + "bbox": [ + 69, + 307, + 291, + 418 + ], + "type": "text", + "content": "Xuan-Phi Nguyen, Wenxuan Zhang, Xin Li, Mahani Aljunied, Zhiqiang Hu, Chenhui Shen, Yew Ken Chia, Xingxuan Li, Jianyu Wang, Qingyu Tan, Liying Cheng, Guanzheng Chen, Yue Deng, Sen Yang, Chaoqun Liu, Hang Zhang, and Lidong Bing. 2024. SeaLLMs - large language models for Southeast Asia. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), pages 294-304, Bangkok, Thailand. Association for Computational Linguistics." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 426, + 290, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 426, + 290, + 448 + ], + "spans": [ + { + "bbox": [ + 69, + 426, + 290, + 448 + ], + "type": "text", + "content": "OpenAI. 2023. GPT-4 technical report. CoRR, abs/2303.08774." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 455, + 290, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 455, + 290, + 511 + ], + "spans": [ + { + "bbox": [ + 69, + 455, + 290, + 511 + ], + "type": "text", + "content": "Guilherme Penedo, Hynek Kydlicek, Loubna Ben Allal, Anton Lozhkov, Margaret Mitchell, Colin Raffel, Leandro von Werra, and Thomas Wolf. 2024. The fine web datasets: Decanting the web for the finest text data at scale. CoRR, abs/2406.17557." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 518, + 291, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 518, + 291, + 596 + ], + "spans": [ + { + "bbox": [ + 69, + 518, + 291, + 596 + ], + "type": "text", + "content": "Ivan Provilkov, Dmitrii Emelianenko, and Elena Voita. 2020. Bpe-dropout: Simple and effective subword regularization. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, ACL 2020, Online, July 5-10, 2020, pages 1882-1892. Association for Computational Linguistics." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 603, + 291, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 603, + 291, + 682 + ], + "spans": [ + { + "bbox": [ + 69, + 603, + 291, + 682 + ], + "type": "text", + "content": "Jeff Rasley, Samyam Rajbhandari, Olatunjri Ruwase, and Yuxiong He. 2020. Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters. In KDD '20: The 26th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, Virtual Event, CA, USA, August 23-27, 2020, pages 3505-3506. ACM." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 688, + 291, + 743 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 688, + 291, + 743 + ], + "spans": [ + { + "bbox": [ + 69, + 688, + 291, + 743 + ], + "type": "text", + "content": "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. 2023. GPQA: A graduate-level google-proof q&a benchmark. CoRR, abs/2311.12022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 751, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 751, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 69, + 751, + 290, + 775 + ], + "type": "text", + "content": "Morgane Rivière, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, Léonard" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 72, + 526, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 526, + 423 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 526, + 423 + ], + "type": "text", + "content": "Hussenot, Thomas Mesnard, Bobak Shahriari, Alexandre Ramé, Johan Ferret, Peter Liu, Pouya Tafti, Abe Friesen, Michelle Casbon, Sabela Ramos, Ravin Kumar, Charline Le Lan, Sammy Jerome, Anton Tsitsulin, Nino Vieillard, Piotr Stanczyk, Sertan Girgin, Nikola Momchev, Matt Hoffman, Shantanu Thakoor, Jean-Bastien Grill, Behnam Neyshabur, Olivier Bachem, Alanna Walton, Aliaksei Severyn, Alicia Parrish, Aliya Ahmad, Allen Hutchison, Alvin Abdagic, Amanda Carl, Amy Shen, Andy Brock, Andy Coenen, Anthony Laforge, Antonia Paterson, Ben Bastian, Bilal Piot, Bo Wu, Brandon Royal, Charlie Chen, Chintu Kumar, Chris Perry, Chris Welty, Christopher A. Choquette-Choo, Danila Sinopalnikov, David Weinberger, Dimple Vijaykumar, Dominika Rogozinska, Dustin Herbison, Elisa Bandy, Emma Wang, Eric Noland, Erica Moreira, Evan Senter, Evgenii Eltsyshev, Francesco Visin, Gabriel Rasskin, Gary Wei, Glenn Cameron, Gus Martins, Hadi Hashemi, Hanna Klimczak-Plucinska, Harleen Batra, Harsh Dhand, Ivan Nardini, Jacinda Mein, Jack Zhou, James Svensson, Jeff Stanway, Jetha Chan, Jin Peng Zhou, Joana Carrasqueira, Joana Iljazi, Jocelyn Becker, Joe Fernandez, Joost van Amersfoort, Josh Gordon, Josh Lipschultz, Josh Newlan, Ju-yeong Ji, Kareem Mohamed, Kartikeya Badola, Kat Black, Katie Millican, Keelin McDonell, Kelvin Nguyen, Kiranbir Sodhia, Kish Greene, Lars Lowe Sjösund, Lauren Usui, Laurent Sifre, Lena Heuermann, Leticia Lago, and Lilly McNealus. 2024. Gemma 2: Improving open language models at a practical size. CoRR, abs/2408.00118." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 303, + 431, + 525, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 431, + 525, + 454 + ], + "spans": [ + { + "bbox": [ + 303, + 431, + 525, + 454 + ], + "type": "text", + "content": "Sailor2 Team. 2024. Sailor2: Sailing in south-east asia with inclusive multilingual llm." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 460, + 525, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 525, + 670 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 525, + 670 + ], + "type": "text", + "content": "Teven Le Scao, Angela Fan, Christopher Akiki, Ellie Pavlick, Suzana Ilic, Daniel Hesslow, Roman Castagné, Alexandra Sasha Luccioni, François Yvon, Matthias Galle, Jonathan Tow, Alexander M. Rush, Stella Biderman, Albert Webson, Pawan Sasanka Ammanamanchi, Thomas Wang, Benoit Sagot, Niklas Muennighoff, Albert Villanova del Moral, Olatunj Ruwase, Rachel Bawden, Stas Bekman, Angelina McMillan-Major, Iz Beltagy, Huu Nguyen, Lucile Saulnier, Samson Tan, Pedro Ortiz Suarez, Victor Sanh, Hugo Laurençon, Yacine Jernite, Julien Launay, Margaret Mitchell, Colin Raffel, Aaron Gokaslan, Adi Simhi, Aitor Soroa, Alham Fikri Aji, Amit Alfassy, Anna Rogers, Ariel Kreisberg Nitzav, Canwen Xu, Chenghao Mou, Chris Emezue, Christopher Klamm, Colin Leong, Daniel van Strien, David Ifeoluwa Adelani, and et al. 2022. BLOOM: A 176b-parameter open-access multilingual language model. CoRR, abs/2211.05100." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 303, + 677, + 525, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 677, + 525, + 700 + ], + "spans": [ + { + "bbox": [ + 303, + 677, + 525, + 700 + ], + "type": "text", + "content": "AI Singapore SEA-LION Team. 2024. Llama3 8b cpt sea-lionv2.1 instruct." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 303, + 708, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 708, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 303, + 708, + 525, + 775 + ], + "type": "text", + "content": "Luca Soldaini, Rodney Kinney, Akshita Bhagia, Dustin Schwenk, David Atkinson, Russell Authur, Ben Bogin, Khyathi Chandu, Jennifer Dumas, Yanai Elazar, Valentin Hofmann, Ananya Jha, Sachin Kumar, Li Lucy, Xinxi Lyu, Nathan Lambert, Ian Magnusson, Jacob Morrison, Niklas Muennighoff," + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 774 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 80, + 72, + 291, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 72, + 291, + 194 + ], + "spans": [ + { + "bbox": [ + 80, + 72, + 291, + 194 + ], + "type": "text", + "content": "Aakanksha Naik, Crystal Nam, Matthew Peters, Abhilasha Ravichander, Kyle Richardson, Zejiang Shen, Emma Strubell, Nishant Subramani, Oyvind Tafjord, Evan Walsh, Luke Zettlemoyer, Noah Smith, Hannaneh Hajishirzi, Iz Beltagy, Dirk Groeneveld, Jesse Dodge, and Kyle Lo. 2024. Dolma: an open corpus of three trillion tokens for language model pretraining research. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 15725-15788, Bangkok, Thailand. Association for Computational Linguistics." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 201, + 290, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 201, + 290, + 267 + ], + "spans": [ + { + "bbox": [ + 69, + 201, + 290, + 267 + ], + "type": "text", + "content": "Zayne Sprague, Xi Ye, Kaj Bostrom, Swarat Chaudhuri, and Greg Durrett. 2024. Musr: Testing the limits of chain-of-thought with multistep soft reasoning. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 275, + 290, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 275, + 290, + 352 + ], + "spans": [ + { + "bbox": [ + 69, + 275, + 290, + 352 + ], + "type": "text", + "content": "Yosephine Susanto, Adithya Venkatadri Hulagadri, Jann Railey Montalan, Jian Gang Ngui, Xian Bin Yong, Weiqi Leong, Hamsawardhini Rengarajan, Peerat Limkonchotiwat, Yifan Mai, and William Chandra Tjhi. 2025. Sea-helm: Southeast asian holistic evaluation of language models. Preprint, arXiv:2502.14301." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 360, + 290, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 360, + 290, + 460 + ], + "spans": [ + { + "bbox": [ + 69, + 360, + 290, + 460 + ], + "type": "text", + "content": "Mirac Suzgun, Nathan Scales, Nathanael Schärli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V. Le, Ed H. Chi, Denny Zhou, and Jason Wei. 2023. Challenging big-bench tasks and whether chain-of-thought can solve them. In *Findings of the Association for Computational Linguistics: ACL* 2023, Toronto, Canada, July 9-14, 2023, pages 13003-13051. Association for Computational Linguistics." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 468, + 290, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 468, + 290, + 500 + ], + "spans": [ + { + "bbox": [ + 69, + 468, + 290, + 500 + ], + "type": "text", + "content": "Sho Takase, Ryokan Ri, Shun Kiyono, and Takuya Kato. 2024. Large vocabulary size improves large language models. CoRR, abs/2406.16508." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 509, + 289, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 509, + 289, + 532 + ], + "spans": [ + { + "bbox": [ + 69, + 509, + 289, + 532 + ], + "type": "text", + "content": "Sailor Team. 2024. Sailor2: Sailing in south-east asia with inclusive multilingual llms." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 539, + 290, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 539, + 290, + 562 + ], + "spans": [ + { + "bbox": [ + 69, + 539, + 290, + 562 + ], + "type": "text", + "content": "The Mosaic ML Team. 2021. composer. https://github.com/mosaicml/composer/." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 570, + 290, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 570, + 290, + 592 + ], + "spans": [ + { + "bbox": [ + 69, + 570, + 290, + 592 + ], + "type": "text", + "content": "The Mosaic ML Team. 2022. Llm foundry. https://github.com/mosaicml/llm-foundry." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 600, + 290, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 600, + 290, + 655 + ], + "spans": [ + { + "bbox": [ + 69, + 600, + 290, + 655 + ], + "type": "text", + "content": "Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. 2024. Openmathinstruct-2: Accelerating AI for math with massive open-source instruction data. CoRR, abs/2410.01560." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 663, + 290, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 663, + 290, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 663, + 290, + 774 + ], + "type": "text", + "content": "Ahmet Üstün, Viraat Aryabumi, Zheng Xin Yong, WeiYin Ko, Daniel D'souza, Gbemileke Onilude, Neel Bhandari, Shivalika Singh, Hui-Lee Ooi, Amr Kayid, Freddie Vargus, Phil Blunsom, Shayne Longpre, Niklas Muennighoff, Marzieh Fadaee, Julia Kreutzer, and Sara Hooker. 2024. Aya model: An instruction finetuned open-access multilingual language model. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 525, + 774 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 315, + 72, + 524, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 72, + 524, + 95 + ], + "spans": [ + { + "bbox": [ + 315, + 72, + 524, + 95 + ], + "type": "text", + "content": "11-16, 2024, pages 15894-15939. Association for Computational Linguistics." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 103, + 525, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 103, + 525, + 214 + ], + "spans": [ + { + "bbox": [ + 304, + 103, + 525, + 214 + ], + "type": "text", + "content": "Bin Wang, Zhengyuan Liu, Xin Huang, Fangkai Jiao, Yang Ding, AiTi Aw, and Nancy Chen. 2024a. Sealeval for multilingual foundation models: From crosslingual alignment to cultural reasoning. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), NAACL 2024, Mexico City, Mexico, June 16-21, 2024, pages 370-390. Association for Computational Linguistics." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 222, + 525, + 299 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 222, + 525, + 299 + ], + "spans": [ + { + "bbox": [ + 304, + 222, + 525, + 299 + ], + "type": "text", + "content": "Ke Wang, Nikolaos Dimitriadis, Guillermo Ortiz-Jiménez, François Fleuret, and Pascal Frossard. 2024b. Localizing task information for improved model merging and compression. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 308, + 525, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 308, + 525, + 386 + ], + "spans": [ + { + "bbox": [ + 304, + 308, + 525, + 386 + ], + "type": "text", + "content": "Yubo Wang, Xueguang Ma, Ge Zhang, Yuansheng Ni, Abhranil Chandra, Shiguang Guo, Weiming Ren, Aaran Arulraj, Xuan He, Ziyan Jiang, Tianle Li, Max Ku, Kai Wang, Alex Zhuang, Rongqi Fan, Xiang Yue, and Wenhu Chen. 2024c. Mmlu-pro: A more robust and challenging multi-task language understanding benchmark. CoRR, abs/2406.01574." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 394, + 525, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 394, + 525, + 483 + ], + "spans": [ + { + "bbox": [ + 304, + 394, + 525, + 483 + ], + "type": "text", + "content": "Chris Wendler, Veniamin Veselovsky, Giovanni Monea, and Robert West. 2024. Do llamas work in english? on the latent language of multilingual transformers. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pages 15366-15394. Association for Computational Linguistics." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 491, + 525, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 491, + 525, + 601 + ], + "spans": [ + { + "bbox": [ + 304, + 491, + 525, + 601 + ], + "type": "text", + "content": "Mitchell Wortsman, Peter J. Liu, Lechao Xiao, Katie E. Everett, Alexander A. Alemi, Ben Adlam, John D. Co-Reyes, Izzeddin Gur, Abhishek Kumar, Roman Novak, Jeffrey Pennington, Jascha Sohl-Dickstein, Kelvin Xu, Jaehoon Lee, Justin Gilmer, and Simon Kornblith. 2024. Small-scale proxies for large-scale transformer training instabilities. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. Open-Review.net." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 610, + 525, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 610, + 525, + 666 + ], + "spans": [ + { + "bbox": [ + 304, + 610, + 525, + 666 + ], + "type": "text", + "content": "Zhangchen Xu, Fengqing Jiang, Luyao Niu, Yuntian Deng, Radha Poovendran, Yejin Choi, and Bill Yuchen Lin. 2024. Magpie: Alignment data synthesis from scratch by prompting aligned llms with nothing. CoRR, abs/2406.08464." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 675, + 525, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 675, + 525, + 774 + ], + "spans": [ + { + "bbox": [ + 304, + 675, + 525, + 774 + ], + "type": "text", + "content": "An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jianxin Yang, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 774 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 80, + 72, + 291, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 72, + 291, + 160 + ], + "spans": [ + { + "bbox": [ + 80, + 72, + 291, + 160 + ], + "type": "text", + "content": "Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Xuejing Liu, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, Zhifang Guo, and Zhihao Fan. 2024a. Qwen2 technical report. CoRR, abs/2407.10671." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 169, + 290, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 169, + 290, + 213 + ], + "spans": [ + { + "bbox": [ + 69, + 169, + 290, + 213 + ], + "type": "text", + "content": "Ziyi Yang, Fanqi Wan, Longguang Zhong, Tianyuan Shi, and Xiaojun Quan. 2024b. Weighted-reward preference optimization for implicit model fusion. CoRR, abs/2412.03187." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 222, + 290, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 222, + 290, + 300 + ], + "spans": [ + { + "bbox": [ + 69, + 222, + 290, + 300 + ], + "type": "text", + "content": "Wei Jie Yeo, Teddy Ferdinan, Przemyslaw Kazienko, Ranjan Satapathy, and Erik Cambria. 2024. Self-training large language models through knowledge detection. In *Findings of the Association for Computational Linguistics: EMNLP* 2024, Miami, Florida, USA, November 12-16, 2024, pages 15033-15045. Association for Computational Linguistics." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 309, + 290, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 309, + 290, + 375 + ], + "spans": [ + { + "bbox": [ + 69, + 309, + 290, + 375 + ], + "type": "text", + "content": "Le Yu, Bowen Yu, Haiyang Yu, Fei Huang, and Yongbin Li. 2024. Language models are super mario: Absorbing abilities from homologous models as a free lunch. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 385, + 290, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 385, + 290, + 450 + ], + "spans": [ + { + "bbox": [ + 69, + 385, + 290, + 450 + ], + "type": "text", + "content": "Wenxuan Zhang, Hou Pong Chan, Yiran Zhao, Mahani Aljunied, Jianyu Wang, Chaoqun Liu, Yue Deng, Zhiqiang Hu, Weiwen Xu, Yew Ken Chia, Xin Li, and Lidong Bing. 2024a. Seallms 3: Open foundation and chat multilingual large language models for southeast asian languages. CoRR, abs/2407.19672." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 460, + 290, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 460, + 290, + 526 + ], + "spans": [ + { + "bbox": [ + 69, + 460, + 290, + 526 + ], + "type": "text", + "content": "Xulang Zhang, Rui Mao, and Erik Cambria. 2024b. Multilingual emotion recognition: Discovering the variations of lexical semantics between languages. In International Joint Conference on Neural Networks, IJCNN 2024, Yokohama, Japan, June 30 - July 5, 2024, pages 1-9. IEEE." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 535, + 290, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 535, + 290, + 623 + ], + "spans": [ + { + "bbox": [ + 69, + 535, + 290, + 623 + ], + "type": "text", + "content": "Yanli Zhao, Andrew Gu, Rohan Varma, Liang Luo, Chien-Chin Huang, Min Xu, Less Wright, Hamid Shojanazeri, Myle Ott, Sam Shleifer, Alban Desmaison, Can Balioglu, Pritam Damania, Bernard Nguyen, Geeta Chauhan, Yuchen Hao, Ajit Mathews, and Shen Li. 2023. Pytorch FSDP: experiences on scaling fully sharded data parallel. Proc. VLDB Endow., 16(12):3848-3860." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 632, + 290, + 719 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 632, + 290, + 719 + ], + "spans": [ + { + "bbox": [ + 69, + 632, + 290, + 719 + ], + "type": "text", + "content": "Wenzhen Zheng, Wenbo Pan, Xu Xu, Libo Qin, Li Yue, and Ming Zhou. 2024a. Breaking language barriers: Cross-lingual continual pre-training at scale. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pages 7725-7738. Association for Computational Linguistics." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 729, + 290, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 729, + 290, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 729, + 290, + 774 + ], + "type": "text", + "content": "Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. 2024b. Llamafactory: Unified efficient fine-tuning of " + }, + { + "bbox": [ + 69, + 729, + 290, + 774 + ], + "type": "inline_equation", + "content": "100+" + }, + { + "bbox": [ + 69, + 729, + 290, + 774 + ], + "type": "text", + "content": " language models. In Proceedings of the" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 525, + 233 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 314, + 72, + 525, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 525, + 116 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 525, + 116 + ], + "type": "text", + "content": "62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), Bangkok, Thailand. Association for Computational Linguistics." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 125, + 525, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 125, + 525, + 179 + ], + "spans": [ + { + "bbox": [ + 304, + 125, + 525, + 179 + ], + "type": "text", + "content": "Chengzhi Zhong, Fei Cheng, Qianying Liu, Junfeng Jiang, Zhen Wan, Chenhui Chu, Yugo Murawaki, and Sadao Kurohashi. 2024. Beyond english-centric llms: What language do multilingual language models think in? CoRR, abs/2408.10811." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 188, + 525, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 188, + 525, + 233 + ], + "spans": [ + { + "bbox": [ + 304, + 188, + 525, + 233 + ], + "type": "text", + "content": "Jeffrey Zhou, Tianjian Lu, Swaroop Mishra, Siddhartha Brahma, Sujoy Basu, Yi Luan, Denny Zhou, and Le Hou. 2023. Instruction-following evaluation for large language models. CoRR, abs/2311.07911." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 71, + 142, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 71, + 142, + 84 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 142, + 84 + ], + "type": "text", + "content": "A Appendix" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 92, + 262, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 92, + 262, + 105 + ], + "spans": [ + { + "bbox": [ + 67, + 92, + 262, + 105 + ], + "type": "text", + "content": "A.1 Continued pre-training (CPT) data" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 110, + 456, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 110, + 456, + 122 + ], + "spans": [ + { + "bbox": [ + 67, + 110, + 456, + 122 + ], + "type": "text", + "content": "Existing data: We utilize existing datasets as shown in Table 6 (HuggingFace Datasets)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 123, + 524, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 123, + 524, + 150 + ], + "spans": [ + { + "bbox": [ + 67, + 123, + 524, + 150 + ], + "type": "text", + "content": "Other data: As shown in Table 6 (the other data section), the listed datasets contain data from a diverse range of domains, including news, books, articles, poems, etc." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 176, + 159, + 418, + 277 + ], + "blocks": [ + { + "bbox": [ + 176, + 159, + 418, + 277 + ], + "lines": [ + { + "bbox": [ + 176, + 159, + 418, + 277 + ], + "spans": [ + { + "bbox": [ + 176, + 159, + 418, + 277 + ], + "type": "table", + "html": "
Continued Pre-training Data
Source (HuggingFace Datasets)LanguagesSize (Billions of Tokens)
bigcode/the-stack-v2-dedupCODE40
allenai/dolmaEN37.5
HuggingFaceFW/fineweb-eduEN7.5
aisingapore/SEA-PILE-v1SEA47.58
aisingapore/SEA-PILE-v2ID7
Source (Others)LanguagesSize (Billions of Tokens)
VinBigDataVI16
WangChanBERTaTH8.5
Others - ENEN5
Others - SEASEA30.92
", + "image_path": "d1fb51f7d5203701b1c1be87c57a86727b27c69725ee0cf2689b1ea2cdbdd506.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 174, + 285, + 417, + 298 + ], + "lines": [ + { + "bbox": [ + 174, + 285, + 417, + 298 + ], + "spans": [ + { + "bbox": [ + 174, + 285, + 417, + 298 + ], + "type": "text", + "content": "Table 6: List of datasets for the continued pre-training stage." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 68, + 320, + 176, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 320, + 176, + 333 + ], + "spans": [ + { + "bbox": [ + 68, + 320, + 176, + 333 + ], + "type": "text", + "content": "A.2 Stage 1 IFT data" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 199, + 356, + 394, + 388 + ], + "blocks": [ + { + "bbox": [ + 261, + 348, + 332, + 356 + ], + "lines": [ + { + "bbox": [ + 261, + 348, + 332, + 356 + ], + "spans": [ + { + "bbox": [ + 261, + 348, + 332, + 356 + ], + "type": "text", + "content": "Stage 1 IFT Datasets" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 199, + 356, + 394, + 388 + ], + "lines": [ + { + "bbox": [ + 199, + 356, + 394, + 388 + ], + "spans": [ + { + "bbox": [ + 199, + 356, + 394, + 388 + ], + "type": "table", + "html": "
Source (HuggingFace Datasets)LanguagesSize
BAAI/Infinity-InstructEN7,449,106
nvidia/OpenMathInstruct-2EN2,000,000
", + "image_path": "55f574138c852425fa5e8074e6ad64c6e4c0ac613ddfd8d9c24174ce9df33553.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 396, + 525, + 419 + ], + "lines": [ + { + "bbox": [ + 67, + 396, + 525, + 419 + ], + "spans": [ + { + "bbox": [ + 67, + 396, + 525, + 419 + ], + "type": "text", + "content": "Table 7: List of datasets for Stage-1-IFT. For BAAI/Infinity-Instruct dataset, any conversation that originally ended with a user turn has had that last turn removed." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 68, + 443, + 176, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 443, + 176, + 456 + ], + "spans": [ + { + "bbox": [ + 68, + 443, + 176, + 456 + ], + "type": "text", + "content": "A.3 Stage 2 IFT data" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 460, + 456, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 460, + 456, + 473 + ], + "spans": [ + { + "bbox": [ + 67, + 460, + 456, + 473 + ], + "type": "text", + "content": "Existing data: We utilize existing datasets as shown in Table 9 (HuggingFace Datasets)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 474, + 524, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 474, + 524, + 498 + ], + "spans": [ + { + "bbox": [ + 67, + 474, + 524, + 498 + ], + "type": "text", + "content": "Synthetic data: As shown in Table 9 (the generated part), we describe how to formulate synthetic data as follows" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 80, + 501, + 524, + 635 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 80, + 501, + 524, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 501, + 524, + 527 + ], + "spans": [ + { + "bbox": [ + 80, + 501, + 524, + 527 + ], + "type": "text", + "content": "- qwen_gemma_synthetic datasets are generated first in English with Qwen 32B, utilizing an approach similar to Magpie. Instructions are then translated into the target language with Gemma 2 27B." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 80, + 528, + 524, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 528, + 524, + 566 + ], + "spans": [ + { + "bbox": [ + 80, + 528, + 524, + 566 + ], + "type": "text", + "content": "- Llama_gemma_synthetic datasets are generated first in English with Llama 3.1 70B, utilizing an approach similar to Magpie (Xu et al., 2024). Instructions are then translated into the target language with Gemma 2 27B." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 80, + 569, + 524, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 569, + 524, + 581 + ], + "spans": [ + { + "bbox": [ + 80, + 569, + 524, + 581 + ], + "type": "text", + "content": "- gemma_synthetic datasets are generated directly with Gemma 2 27B using Magpie (Xu et al., 2024)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 80, + 582, + 524, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 582, + 524, + 608 + ], + "spans": [ + { + "bbox": [ + 80, + 582, + 524, + 608 + ], + "type": "text", + "content": "- sea_multilingual_systemchat is a synthetic dataset translated with Gemma 2 27B from the English systemchat dataset." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 80, + 610, + 505, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 610, + 505, + 622 + ], + "spans": [ + { + "bbox": [ + 80, + 610, + 505, + 622 + ], + "type": "text", + "content": "- rewritten_oasst is a dataset rewritten with Gemma 2 27B based on the English OASST dataset." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 80, + 623, + 524, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 623, + 524, + 635 + ], + "spans": [ + { + "bbox": [ + 80, + 623, + 524, + 635 + ], + "type": "text", + "content": "- rewritten_helpsteer is a dataset rewritten with Gemma 2 27B based on the English Helpsteer dataset." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 645, + 298, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 645, + 298, + 658 + ], + "spans": [ + { + "bbox": [ + 67, + 645, + 298, + 658 + ], + "type": "text", + "content": "A.4 Helpfulness and preference alignment data" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 67, + 663, + 526, + 770 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 663, + 526, + 770 + ], + "spans": [ + { + "bbox": [ + 67, + 663, + 526, + 770 + ], + "type": "text", + "content": "As shown in Table 8, we use the princeton-nlp/gemma2-ultrafeedback-armorm as the source of the alignment data. We then further re-scored with the reward model, nvidia/Llama-3.1-Nemotron-70B-Reward to create the SEA version. In particular, generated-gemma2-27b-seapref-nemotron-70b takes prompts from seald, wangchan_thainstruct, and additional hand-written Southeast Asian cultural prompts collected from native speakers and then generates responses (with a varying temperature) from them with Gemma 2 27B. The responses are then scored with nvidia/Llama-3.1-Nemotron-70B-Reward, with the top-scoring response selected as chosen and vice versa, similar to princeton-nlp/gemma2-ultrafeedback-armorm." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 185, + 120, + 411, + 161 + ], + "blocks": [ + { + "bbox": [ + 271, + 111, + 324, + 119 + ], + "lines": [ + { + "bbox": [ + 271, + 111, + 324, + 119 + ], + "spans": [ + { + "bbox": [ + 271, + 111, + 324, + 119 + ], + "type": "text", + "content": "Preference Data" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 185, + 120, + 411, + 161 + ], + "lines": [ + { + "bbox": [ + 185, + 120, + 411, + 161 + ], + "spans": [ + { + "bbox": [ + 185, + 120, + 411, + 161 + ], + "type": "table", + "html": "
Source (HuggingFace Datasets)LanguagesSize
princeton-nlp/gemma2-ultrafeedback-armormEN61,510
Source (Generated)LanguagesSize
generated-gemma2-27b-seapref-nemotron-70bSEA5,511
", + "image_path": "dd8f45e05294968ff296d594ea86737670808790006a8a27a017ff220a05095b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 120, + 280, + 474, + 709 + ], + "blocks": [ + { + "bbox": [ + 166, + 169, + 426, + 181 + ], + "lines": [ + { + "bbox": [ + 166, + 169, + 426, + 181 + ], + "spans": [ + { + "bbox": [ + 166, + 169, + 426, + 181 + ], + "type": "text", + "content": "Table 8: List of preference datasets used for the alignment stage." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 261, + 271, + 332, + 280 + ], + "lines": [ + { + "bbox": [ + 261, + 271, + 332, + 280 + ], + "spans": [ + { + "bbox": [ + 261, + 271, + 332, + 280 + ], + "type": "text", + "content": "Stage 2 IFT Datasets" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 120, + 280, + 474, + 709 + ], + "lines": [ + { + "bbox": [ + 120, + 280, + 474, + 709 + ], + "spans": [ + { + "bbox": [ + 120, + 280, + 474, + 709 + ], + "type": "table", + "html": "
Source (HuggingFace Datasets)LanguagesSize
BAAI/Infinity-Instruct^*EN1,456,927
HuggingFaceTB/smoltalkEN409,537
allenai/tulu-3-sft-personas-mathEN149,960
parinzee/seed-free-synthetic-instruct-thai-v1TH118,898
HuggingFaceTB/smoltalkEN96,356
HuggingFaceTB/smoltalkEN83,144
arcee-ai/EvolKit-75KEN74,174
AI-MO/NuminaMath-TIREN72,441
Post-training-Data-Flywheel/AutoIF-instruct-61kEN61,492
argilla/ifeval-like-dataEN56,339
HuggingFaceTB/smoltalkEN53,342
ai2-adapt-dev/tulu_v3.9_wildjailbreak_decontaminated_50kEN50,000
ai2-adapt-dev/tulu_v3.9_synthetic_finalresp_wildguardmixtrain_decontaminated_50kEN50,000
allenai/tulu-3-sft-personas-math-gradeEN49,980
allenai/tulu-3-sft-personas-codeEN34,999
HuggingFaceTB/smoltalkEN34,424
allenai/tulu-3-sft-personas-instruction-followingEN29,980
airesearch/WangchanThaiInstructTH25,014
allenai/tulu-3-sft-personas-algebraEN20,000
arcee-ai/EvolKit-20k-viVI15,378
allenai/coconotEN10,983
ai2-adapt-dev/tulu_v3.9_scirff_10kEN10,000
Source (Generated)LanguagesSize
qwen_gemma_synthetic_tamilTA480,000
qwen_gemma_synthetic_thaiTH480,000
qwen_gemma_synthetic_indonesianID465,019
qwen_gemma_synthetic_vietnameseVI465,019
gemma_synthetic_indonesianID458,149
gemma_synthetic_filipinoTL455,093
gemma_synthetic_vietVI291,576
gemma_synthetic_tamilTA276,314
gemma_synthetic_thaiTH186,339
gemma_synthetic_javaneseJV110,000
gemma_synthetic_sudaneseSU110,000
llama_gemma_synthetic_thaiTH88,920
llama_gemma_synthetic_tamilTA88,920
llama_gemma_synthetic_vietnameseVI88,920
llama_gemma_synthetic_javaneseJV88,920
llama_gemma_synthetic_indonesianID88,920
llama_gemma_synthetic_filipinoTL80,000
enrich_27kSEA27,463
seaMultilingual_systemchatSEA1,903
rewritten_oasstSEA841
rewritten_helpsteerSEA838
", + "image_path": "db3a7dde14ce0b341033213c154d6a9aa45d69bffbec2d1963b8cbfc70cff39a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 214, + 718, + 379, + 729 + ], + "lines": [ + { + "bbox": [ + 214, + 718, + 379, + 729 + ], + "spans": [ + { + "bbox": [ + 214, + 718, + 379, + 729 + ], + "type": "text", + "content": "Table 9: List of datasets for Stage-2-IFT." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 14 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05979/c3842c4d-63fe-4775-a7ec-8e60138be3cb_content_list.json b/data/2025/2504_05xxx/2504.05979/c3842c4d-63fe-4775-a7ec-8e60138be3cb_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..e4697cea354bb075c0bacf064a6f6086255e40b9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/c3842c4d-63fe-4775-a7ec-8e60138be3cb_content_list.json @@ -0,0 +1,22400 @@ +[ + { + "type": "text", + "text": "An Empirical Study of GPT-4o Image Generation Capabilities", + "text_level": 1, + "bbox": [ + 127, + 122, + 869, + 148 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Sixiang Chen $^{1*}$ , Jinbin Bai $^{2*}$ , Zhuoran Zhao $^{1*}$ , Tian Ye $^{1*}$ , Qingyu Shi $^{3}$ , Donghao Zhou $^{4}$ , Wenhao Chai $^{5}$ , Xin Lin $^{6}$ , Jianzong Wu $^{3}$ , Chao Tang $^{3}$ , Shilin Xu $^{3}$ , Tao Zhang $^{6}$ , Haobo Yuan $^{6}$ , Yikang Zhou $^{6}$ , Wei Chow $^{2}$ , Linfeng Li $^{2}$ , Xiangtai Li $^{3\\dagger}$ , Lei Zhu $^{1,7\\dagger}$ , Lu Qi $^{6\\dagger}$", + "bbox": [ + 137, + 198, + 870, + 246 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ The Hong Kong University of Science and Technology (GZ) $^{2}$ National University of Singapore $^{3}$ Peking University $^{4}$ The Chinese University of Hong Kong $^{5}$ University of Washington $^{6}$ Wuhan University $^{7}$ The Hong Kong University of Science and Technology", + "bbox": [ + 153, + 246, + 854, + 290 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 325, + 537, + 340 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The landscape of image generation has rapidly evolved, from early GAN-based approaches to diffusion models and, most recently, to unified generative architectures that seek to bridge understanding and generation tasks. Recent advances, especially the GPT-4o, have demonstrated the feasibility of high-fidelity multimodal generation, their architectural design remains mysterious and unpublished. This prompts the question of whether image and text generation have already been successfully integrated into a unified framework for those methods. In this work, we conduct an empirical study of GPT-4o's image generation capabilities, benchmarking it against leading open-source and commercial models. Our evaluation covers four main categories, including text-to-image, image-to-image, image-to-3D, and image-to-X generation, with more than 20 tasks. Our analysis highlights the strengths and limitations of GPT-4o under various settings, and situates it within the broader evolution of generative modeling. Through this investigation, we identify promising directions for future unified generative models, emphasizing the role of architectural design and data scaling. For a high-definition version of the PDF, please refer to the link on GitHub: https://github.com/Ephemeral182/Empirical-Study-of-GPT-4o-Image-Gen.", + "bbox": [ + 184, + 357, + 815, + 551 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 127, + 578, + 267, + 594 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Over the past decade, image generation has undergone a remarkable evolution—from the early successes of GANs [35] to the dominance of diffusion models [89, 82, 26], which have significantly advanced image fidelity and diversity [37, 7]. In parallel, Large Language Models (LLMs) have achieved exceptional performance across diverse natural language tasks by scaling autoregressive next-token prediction, demonstrating the power of unified modeling principles. These advances naturally raise a compelling question: can such principles be extended to image generation?", + "bbox": [ + 124, + 609, + 870, + 696 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, fundamental differences between autoregressive and diffusion-based paradigms present non-trivial challenges. Autoregressive models excel in sequential text generation, while diffusion models have become the de facto standard for high-quality image synthesis. Bridging these modalities within a unified framework remains an open challenge. Several works [96, 101, 100, 34, 24, 13] attempt to bridge this gap via multimodal connectors or instruction tuning, with LLMs serving as planning modules that produce intermediate representations for image generation. While effective to some extent, these paradigms often exhibit limited interaction between text and image modalities, and struggle with content consistency—particularly in image-to-image generation and complex instruction-based synthesis.", + "bbox": [ + 124, + 699, + 869, + 811 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To address these limitations, recent research explores unified generation models that integrate understanding and generation within a single architecture, following three main technical paradigms. The first line of work represents both language and vision as discrete token sequences [67, 98, 110, 104, 19, 65, 109], leveraging VQGAN [28] or similar compressors to tokenize images for compatibility with autoregressive models. A second direction integrates", + "bbox": [ + 125, + 816, + 870, + 875 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.05979v2 [cs.CV] 10 Apr 2025", + "bbox": [ + 22, + 262, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contributions. ☑: schen691@connect.hkust-gz.edu.cn † Corresponding authors.", + "bbox": [ + 148, + 883, + 705, + 898 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Preprint. Work in progress.", + "bbox": [ + 127, + 922, + 290, + 936 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "large language models directly into the diffusion process [128, 126, 112, 72], employing them as denoising backbones for image generation and as unified sequence models for text. While promising, these approaches typically rely on intermediate compression modules such as VAEs or VQVAEs, which may limit visual fidelity or increase architectural complexity. A third and increasingly prominent paradigm investigates discrete diffusion frameworks that natively support both image and text generation within a unified modeling space [71, 73, 93]. Building on this insight, recent works [58, 97] propose fully end-to-end diffusion architectures based on shared Transformer backbones, demonstrating competitive performance and seamless modality integration comparable to similarly sized LLMs.", + "bbox": [ + 125, + 90, + 870, + 203 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Despite these promising directions, such systems still lag behind the sophistication and generalization capabilities of proprietary models like Flux [51] and Midjourney [75], which may lack reasoning capabilities.", + "bbox": [ + 125, + 208, + 870, + 238 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The recent release of GPT-4o [78] marks a significant milestone in multimodal generative modeling. As a native multimodal architecture, GPT-4o demonstrates strong capabilities in generating high-fidelity, photorealistic images while seamlessly unifying vision and language generation—reportedly in an autoregressive fashion. However, its closed-source nature—particularly the lack of disclosure about its architecture, training regimen, and inference mechanisms—poses substantial challenges for scientific scrutiny. This motivates a careful empirical assessment of its capabilities relative to open-source state-of-the-art models.", + "bbox": [ + 125, + 242, + 870, + 325 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Although the visual performance of GPT-4o and Gemini is widely recognized, much of their success likely stems from unprecedented scale in training data, model parameters, and compute resources. Prior studies, including diffusion models and connected-based models, suggest that scaling is a key enabler of generative quality—potentially more so than architectural novelty alone. These trends point to a promising trajectory for unified generative models: with sufficient scale, they may rival or even surpass today's best proprietary systems.", + "bbox": [ + 125, + 332, + 870, + 402 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this study, we conduct a comprehensive evaluation of GPT-4o's image generation performance, benchmarking its outputs against leading systems including Gemini 2.0 Flash Experimental [99] and other state-of-the-art models. Building upon our comparative evaluation across text-to-image, image-to-image, image-to-3D, and image-to-X generation tasks, GPT-4o demonstrates several distinctive strengths:", + "bbox": [ + 125, + 407, + 870, + 465 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Exceptional Text Rendering Capability. GPT-4o demonstrates exceptional capability in rendering textual elements within images, maintaining correct spelling, alignment, and formatting even in document-style generation tasks. This level of text fluency is rarely seen in prior models and is crucial for practical applications such as chart generation, document layout synthesis, and instruction-rich visual storytelling.", + "- Compositional Generalization and Prompt Following. GPT-4o displays impressive compositional abilities, accurately assembling complex scene elements, styles, or attributes described in prompts. This high prompt following enables it to handle fine-grained multi-attribute conditions in generation tasks with minimal loss of semantic detail.", + "- Spatial Reasoning and Multi-View Consistency. In generation tasks involving spatial manipulation, such as 3D view synthesis, camera control, and depth-conditioned rendering, GPT-4o maintains geometric consistency and viewpoint realism. This indicates an inherent capacity for spatial reasoning and structural awareness, even without explicit 3D modeling modules.", + "- Comprehensive Image Transformation Capability. GPT-4o shows strong generalization across a wide spectrum of image-to-image tasks, ranging from low-level image restoration to high-level perceptual understanding. Without task-specific tuning, it almost handles diverse transformations such as denoising, deblurring, relighting, segmentation, and depth estimation. This suggests the model has learned robust visual priors and spatial semantics, enabling it to perform correction and abstract structural prediction under a unified framework." + ], + "bbox": [ + 171, + 476, + 869, + 744 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, limitations remain in inconsistent generation, hallucination, and data bias in underrepresented cultural elements and non-Latin scripts, highlighting current trade-offs in model design and training data coverage.", + "bbox": [ + 125, + 756, + 867, + 786 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "While we do not analyze the internal architecture or implementation details of GPT-4o in this paper*, we believe it plays an important role toward unified multimodal generation. We also emphasize that model architecture is only one part of this progress—training data, model scale, and optimization strategies are equally important. We hope future work will provide more empirical evidence to better understand such proprietary systems and their position within this evolving research landscape.", + "bbox": [ + 125, + 790, + 870, + 861 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "*There is currently no definitive evidence regarding the specific implementation details or architectural design of GPT-4o's image generation capabilities. To ensure the credibility and accuracy of our analysis, we will refrain from making speculative claims in current version.", + "bbox": [ + 125, + 872, + 870, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Evaluation", + "text_level": 1, + "bbox": [ + 127, + 89, + 253, + 104 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As GPT-4o's image generation capability has only recently been released and no API is available, we conduct only qualitative comparisons between GPT-4o, Gemini 2.0 Flash [99], and other state-of-the-art models in their respective domains.", + "bbox": [ + 125, + 119, + 870, + 162 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To systematically compare these models' performance across diverse image generation tasks including text-to-image generation, image-to-image generation, text/image to 3D generation, and various image-to-X generation, we conduct a detailed case study focused on analyzing the performance of these models. This qualitative analysis provides insight into gpt 4o's strengths and limitations in various tasks, as shown in Table 1.", + "bbox": [ + 125, + 169, + 870, + 226 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Low Visual Quality : The image synthesis model fails to generate fine-grained object details or produces blurry outputs. Typical cases include distorted human bodies or unrealistic hand shapes.", + "bbox": [ + 125, + 233, + 869, + 265 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Inconsistent Generation : The image synthesis model produces inconsistent output or image details with input image.", + "bbox": [ + 125, + 272, + 867, + 301 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Lack of Knowledge : The image synthesis model lacks domain-specific knowledge, such as particular artistic styles, and thus generates visually plausible but incorrect results.", + "bbox": [ + 125, + 311, + 867, + 342 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Failure to Follow Instructions : The image synthesis model misinterprets the input prompt and produces inconsistent results. For example, it may fail to capture specified numbers, colors, or object arrangements.", + "bbox": [ + 125, + 349, + 867, + 381 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/69fc7b667b5296c731fc241b01cedfd07a9c8e3fb9b10cdf2db89fc2a34aef2f.jpg", + "table_caption": [ + "Table 1: GPT-4o vs. Baselines: Qualitative error analysis across image generation tasks." + ], + "table_footnote": [], + "table_body": "
Case FigureMeta-taskSub-taskGPT-4oGemini-2.0-flashDomain-SOTA
Figure 1SuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 2Complex Text FollowingSuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 3SuccessSuccessSuccess
Figure 4SuccessSuccessSuccess
Figure 5SuccessSuccessSuccess
Figure 6Text-to-ImageText RenderingSuccessLow Visual QualityLow Visual Quality
Figure 7SuccessLow Visual QualityLow Visual Quality
Figure 8SuccessLow Visual QualityLow Visual Quality
Figure 9Document GenerationSuccessLow Visual QualityLow Visual Quality
Figure 10SuccessLow Visual QualityLow Visual Quality
Figure 11PanoramaLack of KnowledgeSuccessSuccess
Figure 12Style TransferSuccessLack of KnowledgeLack of Knowledge
Figure 13SuccessLack of KnowledgeLack of Knowledge
Figure 14Low Visual QualitySuccessFailure to Follow Instructions
Figure 15Image EditingFailure to Follow InstructionsFailure to Follow InstructionsFailure to Follow Instructions
Figure 16SuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 17SuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 18SuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 19SuccessInconsistent GenerationFailure to Follow Instructions
Figure 20Single-Concept CustomizationSuccessFailure to Follow InstructionsSuccess
Figure 21Multi-Concept CustomizationInconsistent GenerationInconsistent GenerationSuccess
Figure 22Story Image GenerationSuccessFailure to Follow InstructionsSuccess
Figure 23SuccessInconsistent GenerationSuccess
Figure 24Low-Level Vision-DenoisingLow Visual QualityLow Visual QualitySuccess
Figure 25Low-Level Vision-DerainingSuccessInconsistent GenerationSuccess
Figure 26Low-Level Vision-DehazingSuccessLow Visual QualitySuccess
Figure 27Low-Level Vision-Low Light EnhancementLow Visual QualityLow Visual QualitySuccess
Figure 28Low-Level Vision-DeblurringSuccessLow Visual QualitySuccess
Figure 29Low-Level Vision-Super ResolutionSuccessLow Visual QualitySuccess
Figure 30Low-Level Vision-ImpaintingInconsistent GenerationInconsistent GenerationSuccess
Figure 31Low-Level Vision-OutpaintingInconsistent GenerationSuccessSuccess
Figure 32Low-Level Vision-ColorizationSuccessSuccessSuccess
Figure 33Low-Level Vision-Shadow RemovalSuccessFailure to Follow InstructionsSuccess
Figure 34Low-Level Vision-Reflection RemovalInconsistent GenerationFailure to Follow InstructionsSuccess
Figure 35Low-Level Vision-RelightingSuccessFailure to Follow InstructionsSuccess
Figure 36Spatial Control-CannyInconsistent GenerationFailure to Follow InstructionsSuccess
Figure 37Spatial Control-DepthSuccessFailure to Follow InstructionsSuccess
Figure 38Spatial Control-SketchInconsistent GenerationInconsistent GenerationSuccess
Figure 39Spatial Control-PoseSuccessInconsistent GenerationSuccess
Figure 40Spatial Control-MaskInconsistent GenerationFailure to Follow InstructionsInconsistent Generation
Figure 41Camera ControlInconsistent GenerationFailure to Follow InstructionsSuccess
Figure 42Failure to Follow InstructionsFailure to Follow InstructionsSuccess
Figure 43In-Context Visual PromptingFailure to Follow InstructionsFailure to Follow InstructionsN/A
Figure 44Image to 3D ModelingSuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 45UV Map to 3D RenderingSuccessInconsistent GenerationFailure to Follow Instructions
Figure 46Novel View SynthesisSuccessSuccessFailure to Follow Instructions
Figure 47Image SegmentationFailure to Follow InstructionsFailure to Follow InstructionsSuccess
Figure 48SuccessFailure to Follow InstructionsSuccess
Figure 49SuccessFailure to Follow InstructionsSuccess
Figure 50Edge DetectionSuccessSuccessSuccess
Figure 51SuccessFailure to Follow InstructionsSuccess
Figure 52SuccessFailure to Follow InstructionsSuccess
Figure 53Salient ObjectSuccessFailure to Follow InstructionsSuccess
Figure 54SuccessSuccessSuccess
Figure 55SuccessSuccessSuccess
Figure 56Depth EstimationSuccessFailure to Follow InstructionsSuccess
Figure 57Normal EstimationSuccessFailure to Follow InstructionsSuccess
Figure 58Layout DetectionInconsistent GenerationInconsistent GenerationSuccess
Figure 59Text DetectionFailure to Follow InstructionsFailure to Follow InstructionsSuccess
Figure 60Inconsistent GenerationInconsistent GenerationSuccess
Figure 61Inconsistent GenerationInconsistent GenerationSuccess
Figure 62Inconsistent GenerationInconsistent GenerationSuccess
Figure 63Inconsistent GenerationInconsistent GenerationSuccess
", + "bbox": [ + 129, + 138, + 870, + 883 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.1 Text-to-Image Tasks", + "text_level": 1, + "bbox": [ + 127, + 90, + 310, + 104 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.1.1 Complex Text Following Capability", + "text_level": 1, + "bbox": [ + 127, + 116, + 428, + 132 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Recent progress in text-to-image generation has shown impressive abilities in generating diverse and realistic images based on text prompts. However, composing multiple objects with various attributes and relationships accurately into one scene remains a significant challenge for current text-to-image generative models [92, 85, 8, 81, 6]. In this section, we assess models' ability for compositional text-to-image generation from four perspectives following [41], which include attribute binding, numeracy, object relationship, and complex compositions. Attribute binding evaluates whether the model correctly assigns attributes, such as color, shape, and texture to the appropriate objects. Numeracy evaluates whether the number of generated objects matches the quantities specified in the prompt. Object relationships refer to both spatial (2D/3D) and non-spatial interactions among objects. Complex compositions evaluate the model's ability to handle multiple types of constraints simultaneously, especially given long or detailed prompts.", + "bbox": [ + 124, + 140, + 870, + 281 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Figure 1 row 1, GPT-4o outperforms both Gemini 2.0 Flash and Midjourney in numeracy tasks. While GPT-4o accurately represents a single plate, Gemini 2.0 and Midjourney represent two plates instead. In terms of understanding object relationships, GPT-4o is the only model that correctly infers the action \"walk towards\" from the ragdoll to the labrador. However, GPT-4o struggles with more complex terms like \"pentagonal pyramid\", failing to interpret it correctly (see Figure 1 row 4). This suggests that GPT-4o may have difficulty accurately interpreting objects with unusual geometries. When it comes to abstract prompts, GPT-4o also appears to lack imagination (see Figure 2 row 2), whereas Midjourney v6.1 demonstrates better creativity in this case, outperforming both GPT-4o and Gemini 2.0 Flash.", + "bbox": [ + 124, + 284, + 870, + 397 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For complex text-to-image generation, we evaluate GPT-4o's performance with Gemini 2.0 Flash [99] and FLUX.1-Pro [51], using the text prompts collected from [124, 106, 115]. As shown in Figure 3, both GPT-4o and FLUX excel at generating realistic and harmonious scenes align with the text prompts. However, we observe that GPT-4o shows limitations in generating culturally related elements. For example, the generated crown for the Chinese general is western-style rather than chinese-style (see Figure 4 row 2). Additionally, in large scene generation, GPT-4o struggles to maintain boundary continuity, whereas FLUX produces a more natural composition (see Figure 4 row 3).", + "bbox": [ + 124, + 401, + 870, + 501 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Overall, we conclude that GPT-4o excels at text-to-image generation in terms of attribute binding, generative numeracy, object relationship, and complex compositions. However, it exhibits limitations in generating uncommon objects, culturally specific elements and in maintaining continuity when composing large scenes.", + "bbox": [ + 124, + 505, + 870, + 550 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Text-to-Image Generation", + "text_level": 1, + "bbox": [ + 191, + 99, + 392, + 116 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/5e416a9762e2def779608eb1bf6eee5a9cee49745a628b45ccc17073ed461705.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 251, + 126, + 269, + 143 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation: Visual content precisely following the text instruction.", + "text_level": 1, + "bbox": [ + 271, + 128, + 772, + 145 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b56cffa92dcc8965f109102c1a0975dedfc665539ea8cf6c71d5a2879dfe8dcc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 171, + 146, + 362, + 292 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e6d59fa9484b3877034764c9fa8752e51c78a7b4eba2a3dd224cdb9236f23368.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 393, + 146, + 586, + 295 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/114bf170640575674f20a5f89dd3e1c35830102990e7d45e9fe0e42bf870b7d0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 614, + 146, + 823, + 295 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e21293e1dde8f2f8a8736c0a4d673afa08874092081ea4b1ec07788bbe177b05.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 171, + 313, + 366, + 460 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/cde20841ace4621a197135b25fda935e422854fc542a786ca15f9aea2eb46486.jpg", + "image_caption": [ + "Input Text: \"A yellow bowl, a blue mug and a pink plate on the table.\"", + "Input Text: \"A ragdoll walks towards a labrador.\"" + ], + "image_footnote": [], + "bbox": [ + 393, + 311, + 589, + 464 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d0a18282d6404ef0ab645148e73d17fe445982eb8c40cb3c0d5a9081efd6d52c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 614, + 310, + 821, + 464 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3bffa6f0cb5545336d1dece3985a3fcaff2ad60da5c231db47b90005c63f4ceb.jpg", + "image_caption": [ + "Input Text: \"Three differently colored apples (yellow, green, red from left to right) with a Coca-Cola bottle placed behind the middle apple.\"" + ], + "image_footnote": [], + "bbox": [ + 171, + 479, + 370, + 627 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f333d581ad570f753e99a7e73c99d827c89d746ae6f1f549c53cf30cf7e7ea33.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 395, + 479, + 589, + 627 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/107a161e8c64a21e6ae887d967ee6aa6bf1c940aafacc290e429d8deb9516754.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 614, + 478, + 821, + 627 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/00b435bd1d8026aa4f0d08be8a9579479f625012cae88f49f81a5d958cf5c199.jpg", + "image_caption": [ + "Input Text: \"The oval sphere was nestled between the rectangular prism and the pentagonal pyramid.\"", + "Figure 1: Task: Compositional text-to-image generation. Evaluate the image-text alignment on attribute binding, numeracy, and object relationship. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o outperforms Gemini 2.0 Flash and Midjourney v6.1 across all aspects. However, GPT-4o struggles with uncommon objects with a special geometry." + ], + "image_footnote": [], + "bbox": [ + 171, + 657, + 369, + 806 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b1e74b98b6b45c314db43ad44efec736de0526ea53933c41d6f494dfc222ec5c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 393, + 657, + 589, + 806 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ec488303c09dbf9dfb728fd1952368c1c3fa85f708e3b290ecd1a36b50ef4803.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 614, + 657, + 821, + 806 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 235, + 829, + 287, + 842 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 436, + 830, + 550, + 844 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Midjourney v6.1", + "bbox": [ + 665, + 829, + 774, + 844 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/41cf37136782a8224523753684cab11c0ddb36c12dfd39ee89ac851285853af2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 253, + 127, + 269, + 143 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Evaluation: Visual content precisely following the text instruction.", + "text_level": 1, + "bbox": [ + 272, + 130, + 772, + 145 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/742bdab0de5d04587ee79a5c9d93f90cfb4494e5a48dcf258a1406c6265bd627.jpg", + "image_caption": [ + "Input Text: \"The round, juicy watermelon sat in the cool, refreshing bowl of ice, waiting to be sliced open and devoured.\"" + ], + "image_footnote": [], + "bbox": [ + 176, + 146, + 372, + 292 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c73e3aa340f33cab4cf19354249e37fe1bd7f76aef6ec7fbcc154198f1dad05b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 393, + 146, + 611, + 292 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/382030f3e3f0298bca0a14e5cda55ee5859de8f05e70a6182873cc5b53987e77.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 633, + 146, + 823, + 292 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9a87f98dd86139de0d38246d54b1d070591e2fdb526d3285409a473583f28ec6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 176, + 319, + 372, + 469 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/fce6b55e09781aa60141751625be4a55c7d0fd1eef584be53a6585c781eade33.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 397, + 319, + 609, + 468 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0f7ad2208e211f067756484bb7a6847038cf13549a2f34c8a13b12eeda49aad7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 632, + 319, + 826, + 469 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d87f5d38548a498be4acc2e6175a859f019a900cdcbd8394bd2700b4f145ed8f.jpg", + "image_caption": [ + "Input Text: \"The bold, expressive strokes of the artist's brush brought the blank canvas to life, forming a vibrant and dynamic masterpiece.\"" + ], + "image_footnote": [], + "bbox": [ + 176, + 496, + 372, + 643 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c87b7b6792c5fda9d5340959a376273a7f5d0b80db6770aea6ec9e15c6ec596d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 397, + 496, + 609, + 643 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4d7cf29e3244126ed31062342f32c73674817be1d0079f2da561d4e75f3368e2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 632, + 496, + 828, + 645 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c36a7bcd11820fd2f7d3c63fbf0e563dbdebdbcf2828507130fec8b86c2c1cc1.jpg", + "image_caption": [ + "Input Text: \"The heavy raindrops fell on the smooth glass and the textured roof.\"" + ], + "image_footnote": [], + "bbox": [ + 176, + 662, + 372, + 811 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0e3a0dc448dd3f289f1837b24241f0ae490d1d79799115f52e41475efc038437.jpg", + "image_caption": [ + "Input Text: \"The gentle, soothing melody of the piano filled the concert hall, as the pianist's fingers danced over the keys.\"", + "Figure 2: Task: Compositional text-to-image generation. Evaluate the image-text alignment on attribute binding and complex compositions. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o outperforms the other two models in generating objects aligned with the text prompts accurately. But for more abstract and creative tasks, Midjourney v6.1 performs the best." + ], + "image_footnote": [], + "bbox": [ + 397, + 662, + 611, + 813 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/adcb856bb8a351cb62cc8d6fd9dcfe0f715a542653534f39e99b1973a7067ec8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 632, + 662, + 828, + 813 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 233, + 842, + 285, + 854 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 434, + 842, + 547, + 854 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Midjourney v6.1", + "bbox": [ + 663, + 840, + 771, + 854 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Text-to-Image Generation", + "bbox": [ + 194, + 102, + 392, + 118 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Text-to-Image Generation (with complex text prompt)", + "text_level": 1, + "bbox": [ + 174, + 107, + 393, + 137 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Evaluation: Visual content precisely following the text instruction.", + "text_level": 1, + "bbox": [ + 223, + 148, + 787, + 165 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/27fce9400300fb184ed43e1e4b4f8eb042ae16f63646b6eda324b41fff54aede.jpg", + "image_caption": [ + "Input Text: \"An icy landscape. A vast expanse of snow-covered mountain peaks stretches endlessly. Beneath them is a dense forest and a colossal frozen lake. Three people are boating in three boats separately in the lake. Not far from the lake, a volcano threatens eruption, its rumblings felt even from afar. Above, a ferocious red dragon dominates the sky and commands the heavens, fueled by the volcano's relentless energy flow.\" (Prompt from GenArtist)" + ], + "image_footnote": [], + "bbox": [ + 173, + 167, + 359, + 311 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/116d28e49cd90395347ff38e9ec8e1c04b768757b89b7a9a1d0a0f0f317b20f4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 390, + 167, + 578, + 311 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/bfaf1c18b294719962fa4fcaa9024d109045581572e81ebad1ebe756236b696a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 609, + 167, + 797, + 311 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/64e6d603fb4bb84b5ebce439ab2fdb3c6e0e566e8af8982c1c50d5378b6fe487.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 390, + 359, + 532 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7ed09698c0927be9038e1132fc7243c7e628f5e5c041880670c7d1ac190273ad.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 390, + 388, + 578, + 532 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/484e31e49c1e7a2ebd3c367ae0db8fcd10716584cec8c8e06beaca0f6c7a0381.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 609, + 388, + 795, + 532 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2de6dd439640e1a491cd2669646ca4deab67272994343185f41956a82e6a40a1.jpg", + "image_caption": [ + "Input Text: \"In a magical seascape, a majestic ship sails through crystal blue waters surrounded by vibrant marine life and soaring birds. Towering cliffs frame the scene, while a stunning rainbow arches across the sky, blending with ethereal clouds. This enchanting journey captures the serene beauty of nature's wonders.\" (Prompt from IterComp)" + ], + "image_footnote": [], + "bbox": [ + 173, + 599, + 359, + 742 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0c0470a214490980a8b144c3e88a00976a14f32edb1762c91f3846a0c05ec5b2.jpg", + "image_caption": [ + "Input Text: \"On the rooftop of a skyscraper in a bustling cyberpunk city, a figure in a trench coat and neon-lit visor stands amidst a garden of bio-luminescent plants, overlooking the maze of flying cars and towering holograms. Robotic birds flit among the foliage, digital billboards flash advertisements in the distance.\" (Prompt from IterComp)" + ], + "image_footnote": [], + "bbox": [ + 390, + 598, + 578, + 743 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/745ba50ac275ead363177aa6a6389523a01b4c236062fe89cb966d79aeafe69b.jpg", + "image_caption": [ + "Figure 3: Task: Compositional text-to-image generation. Evaluate the image-text alignment on complex compositions. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and FLUX.1-Pro [51]. Observation: GPT-4o and FLUX can generate more harmonious and natural scene than Gemini 2.0 Flash." + ], + "image_footnote": [], + "bbox": [ + 609, + 598, + 795, + 742 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 233, + 806, + 290, + 820 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 418, + 806, + 544, + 820 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "FLUX", + "bbox": [ + 676, + 806, + 723, + 820 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Text-to-Image Generation (with complex text prompt)", + "text_level": 1, + "bbox": [ + 168, + 119, + 387, + 150 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/30ed799a330abd36ec3b456792e50674b62123dda7d183a4909f198292186c06.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 197, + 157, + 217, + 175 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Evaluation: Visual content precisely following the text instruction.", + "text_level": 1, + "bbox": [ + 222, + 162, + 787, + 178 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/0a19bafce54aa9b28620506b9c6051449ff283c024b7b8bdde3dea6959a5f2ab.jpg", + "image_caption": [ + "Input Text: \"Under the luminous full moon, a serene Japanese garden with traditional pagodas and a tranquil pond creates a magical night scene. The soft glow from the lantern-lit buildings reflects on the water, blending nature and architecture in harmony. The moonlight bathes the landscape, enhancing the peaceful ambiance.\" (Prompt from IterComp)" + ], + "image_footnote": [], + "bbox": [ + 171, + 181, + 357, + 325 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/1660f44ab6725c952d5b0bc43505f77c0d7cb4b552be04d6430ee2b25ec63d61.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 393, + 183, + 578, + 325 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/89e41678c8fcc68e299a5cf2931d67bb1f2339142bca0c32c1593b4888650519.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 611, + 183, + 795, + 325 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/f016fb55ce2f2b9f48a4d23f36970310500e1250abf50d5e88f1902507f424c6.jpg", + "image_caption": [ + "Input Text: \"A Chinese general wearing a crown, with whiskers and golden Chinese style armor, standing with a majestic dragon head on his chest, symbolizing his strength, wearing black and gold boots. His appearance exudes a sense of authority, wisdom, and an unyielding spirit, embodying the ideal ancient Chinese hero.\" (Prompt from RPG)" + ], + "image_footnote": [], + "bbox": [ + 197, + 393, + 333, + 547 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/9077e20f899c60d5e1c26e1748dfa141980bf09da6c613467d42c1143bab34a1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 413, + 393, + 558, + 547 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/c341891a5dd74ee440ca81c75da11adba9b78fd29a19903df5c98dbac7513e19.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 627, + 393, + 777, + 547 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/4569221e0b0de61258bfb9ba425fbed4cda37201c120d48edf92be4e3bff1d5c.jpg", + "image_caption": [ + "Input Text: \"A beautiful landscape with a river in the middle, the left of the river is in the evening and in the winter with a big iceberg and a small village while some people are skiing on the river and some people are skating, the right of the river is in the summer with a volcano in the morning and a small village while some people are playing.\" (Prompt from RPG)" + ], + "image_footnote": [], + "bbox": [ + 151, + 617, + 357, + 724 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/475d909f8bd94a4e962a1af7f117f1db9f76e1f2cd7047ccbdf158cec0cbc2b2.jpg", + "image_caption": [ + "Figure 4: Task: Compositional text-to-image generation. Evaluate the image-text alignment on complex compositions. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and FLUX.1-Pro [51]. Observation: GPT-4o struggles to generate culturally related elements and maintain boundary continuity (see rows 2 and 3), similar to Gemini 2.0 Flash and FLUX." + ], + "image_footnote": [], + "bbox": [ + 361, + 618, + 504, + 724 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/ef1d1bd4564e7e63c547c011879fd964bcadd7553b251ad9931c404cc35e496b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 617, + 651, + 724 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/1086a817de2fda2fa73bc2221b337c167aa823711f8d87db081972a8c108577d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 655, + 617, + 843, + 724 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 220, + 795, + 277, + 809 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 431, + 795, + 557, + 809 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "FLUX", + "bbox": [ + 722, + 792, + 767, + 806 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "2.1.2 Text Rendering", + "text_level": 1, + "bbox": [ + 127, + 90, + 292, + 107 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Text rendering is a task that aims at generating texts (characters, sentences, or even paragraphs) on an image. The text content is usually guided by the input prompt. Previous models [27, 2] show good capability in generating short text (within 10 words, such as signs or short phrases), but their ability to generate long texts remains limited.", + "bbox": [ + 125, + 114, + 870, + 157 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "As shown in Figure 5, GPT-4o demonstrates comparable abilities to existing state-of-the-art (SOTA) baselines when generating short texts. All the methods except FLUX [51] perform well at rendering short text following the prompt. In this section, we primarily focus on long text rendering to examine whether GPT-4o can surpass these baselines for extended textual content.", + "bbox": [ + 125, + 162, + 869, + 219 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We choose POSTA [12], Gemini 2.0 Flash [99], Ideogram 3.0 [2], and Playground-v3 [64] as the baselines because of their established capabilities in rendering longer texts. The results are shown in Figure 6 and Figure 7.", + "bbox": [ + 125, + 224, + 867, + 253 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "From these examples, we make the following key observations:", + "bbox": [ + 125, + 260, + 545, + 273 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- GPT-4o's strength in long text generation: Compared with other baselines, GPT-4o demonstrates a superior ability to generate long, coherent text. In example 1 and example 3, GPT-4o produces detailed textual information with fewer than three characters generated incorrectly across more than 100 characters of text.", + "- Baseline limitations: When the input prompt becomes extremely long, models such as Gemini 2.0 Flash, Ideogram 3.0, and Playground-v3 often produce significantly more errors or produce vague text patches that are difficult to recognize.", + "- POSTA's performance: As a model specifically designed for poster-style text generation, POSTA performs closely to, or in some instances slightly more precisely than, GPT-4o. We hypothesize this is due to its multi-step pipeline tailored for long text rendering." + ], + "bbox": [ + 169, + 285, + 867, + 431 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Overall, we conclude that GPT-4o excels at long text rendering, offering overwhelming performance compared to most existing commercial models, and delivering results on par with the latest specialized research models.", + "bbox": [ + 125, + 445, + 869, + 474 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 509, + 946 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Short Text Rendering", + "text_level": 1, + "bbox": [ + 163, + 114, + 334, + 130 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/9d7e40286ddeb28b93be6916d8101d062aa91f512e1c1ac6b0ccc889ae647631.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 141, + 359, + 157 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Evaluation: Text Rendering Precision.", + "text_level": 1, + "bbox": [ + 362, + 145, + 656, + 160 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/08d9d7d0f4498fa74ab30be14fe461e1fd36819539ce56b0a1505126518de149.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 158, + 162, + 295, + 271 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/234feaba9c9e1d0f65d67e6df566f551271d9b6bc16b92cc18990362a5b21b2f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 315, + 164, + 452, + 271 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/0ce5a44bd3cd7500b4b78db75c0f1ccc81baa684ffa77ebdb728c28b0fe9e785.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 470, + 164, + 609, + 271 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/204fddddcbcd5e6532fa8831a9afe9f12c4bf879dee56a592fddb35d74766418.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 627, + 164, + 838, + 271 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Input Text: \"A beautiful painting of flowing colors and styles forming the words 'The GPT-4o/Ideogram/FLUX/SD3 research paper is nowhere!'. the background is speckled with drops and splashes of paint.\"", + "bbox": [ + 163, + 279, + 818, + 324 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/5d4b6580226201ab5042c29511d6da5a3409b9f5c7fee1d2d9c074d14f64765b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 158, + 330, + 251, + 438 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/6ec2422a775f3f017131805ebc131bf30d8d32953e433ab7a7e35ce013d814bd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 274, + 330, + 413, + 438 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/4f7ea6846a9f2e71ede47f66f8045aa234b77d2155c2f56eca68f95514f68e80.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 437, + 330, + 575, + 438 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/1b62b31f138cbdfe4ceec53b959cee36fc3c5f0c22524ab44623a5cd565ac9c8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 599, + 330, + 839, + 438 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Input Text: \"Beautiful pixel art of a Wizard with hovering text 'Achievement unlocked: Diffusion models can spell now'.\"", + "bbox": [ + 166, + 446, + 781, + 477 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/4ba3110052c7723a701e2ccf5e847ad8c21956fcb34a978c1e55cce340d0131c.jpg", + "image_caption": [ + "Figure 5: Task: Short text rendering. Generate prompt-aligned, concise textual content (typically within 10 words) on an image. Setup: Each sample is produced based on a guiding text prompt. Comparisons are made with prior SOTA models [27, 2] and FLUX [51]. Observations: GPT-4o achieves performance on par with existing SOTA baselines in rendering short texts, consistently following the prompt with minimal errors. All evaluated methods—except FLUX [51]—deliver high-fidelity results in this setting." + ], + "image_footnote": [], + "bbox": [ + 158, + 486, + 251, + 592 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/4fed5bdfe6b3d24177ff8f96422315e81b3ae864616ce0f21770566bf7fd8891.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 282, + 486, + 501, + 592 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/83fabbd535e803f344bff99d3d53a6c8a71d95998a6a377d7bae9a19ebb0830a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 532, + 486, + 669, + 592 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/63a0fda286dbcf6aab1f70694ab0576b25d5fd247827bdfa027aca3ce2984b08.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 702, + 486, + 839, + 592 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Input Text: \"A monkey holding a sign reading 'Scaling transformer models is awesome'.\"", + "bbox": [ + 166, + 603, + 784, + 618 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/b44926116e89a328041a0572f04bcac45c5be77916af431b03c4a0ab99dd761f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 158, + 631, + 295, + 738 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/cb9fbff8e79609bd0c0397e9ff8bdd26c4934e0c043bf90309793e247c53acde.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 305, + 631, + 442, + 738 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/ecba2242bac62c8676019c58e03ec1fc8ba9f66e7431827caf98cf3565ae7429.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 452, + 631, + 589, + 738 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/eb143f4361a33ce174515f230c11a216ac7590d56a2fc328a2adc862245f0ce7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 601, + 632, + 836, + 738 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Input Text: \"A surreal and humorous scene in a classroom with the words 'GPUs go brrrrr' written in white chalk on a blackboard. In front of the blackboard.\"", + "bbox": [ + 165, + 744, + 803, + 773 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 200, + 784, + 251, + 797 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Ideogram 3.0", + "bbox": [ + 326, + 784, + 421, + 799 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "FLUX", + "bbox": [ + 500, + 784, + 542, + 797 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "SD3", + "bbox": [ + 700, + 784, + 736, + 797 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Evaluation: Text Rendering Precision.", + "text_level": 1, + "bbox": [ + 374, + 127, + 643, + 141 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/afce16d9cf439bcdc04fa94f8ca4f2227195b2cf36f175b299fa1b98664ddbfa.jpg", + "image_caption": [ + "GPT 40" + ], + "image_footnote": [], + "bbox": [ + 181, + 146, + 320, + 306 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/c6b354d42faa888d692b35bdfbc5cd4599018152cf0064da6cd13e513960b86e.jpg", + "image_caption": [ + "POSTA" + ], + "image_footnote": [], + "bbox": [ + 321, + 146, + 462, + 306 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/f4cd9c8e039a64fce1592ca2bb40e8f9f9bf8f8c530f65984c76ff6708a91c6f.jpg", + "image_caption": [ + "Gemini 2.0 Flash" + ], + "image_footnote": [], + "bbox": [ + 464, + 146, + 604, + 306 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/c5582fe68220a5a384efa294ef35989ebef4760bd5b2897bd0ee23ceffe344a0.jpg", + "image_caption": [ + "Ideogram 3.0" + ], + "image_footnote": [], + "bbox": [ + 604, + 146, + 812, + 306 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Input Text:", + "text_level": 1, + "bbox": [ + 187, + 327, + 269, + 339 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "\"Generate a movie poster with a sci-fi space theme, a solitary figure standing on an alien planet, facing a massive outpost.", + "bbox": [ + 187, + 340, + 808, + 366 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The poster displays the following text:", + "bbox": [ + 187, + 366, + 441, + 378 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Title: The Last Outpost", + "bbox": [ + 187, + 378, + 344, + 391 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Subtitle: When the stars fall, the truth rises", + "bbox": [ + 187, + 391, + 486, + 404 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Information:", + "text_level": 1, + "bbox": [ + 187, + 405, + 276, + 416 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Produced by Jackson Ward", + "bbox": [ + 187, + 417, + 367, + 430 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Music by Aria Calloway", + "bbox": [ + 187, + 431, + 336, + 443 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Screenplay by Elena Sharpe", + "bbox": [ + 187, + 443, + 370, + 455 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Directed By Sylvia Hartman", + "bbox": [ + 187, + 455, + 370, + 469 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "\"A visually stunning and narratively gripping exploration of the unknown. The Last Outpost masterfully blends elements of science fiction, mystery, and psychological thriller, creating a hauntingly atmospheric journey that will leave audiences on the edge of their seats.\" -- Global Film Review\".", + "bbox": [ + 187, + 469, + 795, + 518 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/e1dc82e6233835aef84e7c0d62fd27535b3b4aee7f6bc4beab765dacdbbdc4c1.jpg", + "image_caption": [ + "GPT 40" + ], + "image_footnote": [], + "bbox": [ + 181, + 523, + 316, + 681 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/59f2bdbb4c18ff5b2eaf1af8462de2d557b98afb5403da1a76ec1f639e6017a6.jpg", + "image_caption": [ + "POSTA" + ], + "image_footnote": [], + "bbox": [ + 318, + 523, + 460, + 683 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/3c2edc361f25ec06384618fcf44a118efef67ed2c14c3a957eec1c7c432abf66.jpg", + "image_caption": [ + "Gemini 2.0 Flash" + ], + "image_footnote": [], + "bbox": [ + 460, + 523, + 616, + 683 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/1751faef203209061a6fae572da8f65fff10f9f74ef783748441d19f7f4a2be9.jpg", + "image_caption": [ + "Ideogram 3.0", + "Figure 6: Task: Long text rendering. Generate extended, coherent, and prompt-consistent textual content on an image. Setup: Evaluations are conducted against advanced baselines including POSTA [12], Gemini 2.0 Flash [99], Ideogram 3.0 [2], and Playground-v3 [64]. Observations: GPT-4o excels in long text rendering by producing coherent, detailed textual information with very few character errors. In contrast, models like Gemini 2.0 Flash, Ideogram 3.0, and Playground-v3 often exhibit increased errors or generate vague text when faced with lengthy prompts, while POSTA's tailored multi-step pipeline sometimes yields competitive precision. Overall, GPT-4o outperforms most commercial models and rivals specialized research approaches in extended text generation." + ], + "image_footnote": [], + "bbox": [ + 617, + 523, + 823, + 683 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Input Text:", + "text_level": 1, + "bbox": [ + 184, + 705, + 264, + 718 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "\"Create a poster with the theme of a Journey of Solitude. The background should depict a lone figure walking toward an unusable form of transportation. The scene should evoke a sense of being lost, helplessness, and desolation, capturing the emotional weight of losing oneself in a barren, unforgiving landscape.", + "bbox": [ + 183, + 719, + 800, + 770 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Title: Solitary Journeys", + "bbox": [ + 184, + 770, + 341, + 784 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Subtitle: Elara Voss", + "bbox": [ + 184, + 784, + 316, + 795 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Information: WANDERING THROUGH THE UNKNOWN\".", + "bbox": [ + 184, + 796, + 563, + 808 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Long Text Rendering", + "bbox": [ + 207, + 99, + 359, + 116 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 509, + 946 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Long Text Rendering", + "text_level": 1, + "bbox": [ + 200, + 291, + 356, + 308 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/072dabf79d619daa8eeb4cd2f8867859c2a5525ca4ceff1ee839637ee46f829f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 346, + 316, + 367, + 333 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Evaluation: Text Rendering Precision.", + "text_level": 1, + "bbox": [ + 367, + 320, + 647, + 335 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/37c0d9d0ba1eed41834771a2d8df3690d2b37294a08a5844702161e53aa817ab.jpg", + "image_caption": [ + "GPT 40", + "Figure 7: Task: Long text rendering. The Setup and Observations are the same as Figure 6." + ], + "image_footnote": [], + "bbox": [ + 166, + 339, + 307, + 503 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/61b9d5a9c147c446387301c4f13acd7ceeccc06f653954410c1dc398d1fd4bc3.jpg", + "image_caption": [ + "POSTA" + ], + "image_footnote": [], + "bbox": [ + 308, + 339, + 454, + 503 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/96c1a55c7c5f312a71c89d34f708b0e4f8da4556922ffdee41d673b76ea82dad.jpg", + "image_caption": [ + "Gemini 2.0 Flash" + ], + "image_footnote": [], + "bbox": [ + 454, + 339, + 614, + 503 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/faf4eca5a3720dff227ce6a4ad8f4898de848e4e08fec90adf87952d14446f7e.jpg", + "image_caption": [ + "Playground-v3" + ], + "image_footnote": [], + "bbox": [ + 616, + 339, + 828, + 503 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Input Text:", + "text_level": 1, + "bbox": [ + 174, + 537, + 259, + 550 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "\"Please generate an artistic and stylized promotional poster. The style is an artistic painting style. The theme is about nature and city. The poster displays the following information: Title: Fragmented Harmony", + "bbox": [ + 174, + 551, + 792, + 590 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Subtitle Between the steel and sky, life finds its way.", + "bbox": [ + 174, + 590, + 535, + 604 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Information: Amid the towering strucions and the quiet persistence of nature, a delicate balance emerges. The complex and often contradictory relationship between urban development and the natural world reveals itself in fleeting moments of harmony. Though fragmented, life continues, threading its way through the shadows of progress. Here, conflict and coexistence form an intricate dance--sometimes at odds, sometimes in unexpected unity\".", + "bbox": [ + 173, + 604, + 813, + 671 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "2.1.3 Document Generation", + "text_level": 1, + "bbox": [ + 127, + 90, + 338, + 104 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We also explore a novel task: document image generation with GPT-4o, comparing its performance with Gemini 2.0 Flash [99] and Playground-v3 [64]. As shown in Figure 8 - 10, GPT-4o produces document images with cleaner layouts and more consistent content.", + "bbox": [ + 125, + 114, + 870, + 157 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Document Image Generation", + "text_level": 1, + "bbox": [ + 169, + 176, + 392, + 191 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/7ff25ac80d044220739d61284585a070ee9142eede21fe8dbf47dff3cb2ffa9c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 203, + 356, + 220 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Evaluation: Text Rendering Precision.", + "text_level": 1, + "bbox": [ + 357, + 207, + 650, + 222 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Attention Is All You Need", + "text_level": 1, + "bbox": [ + 187, + 255, + 331, + 266 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Ashish Vaswani Noam Shazeer Niki Parmar Jakob Uszkoreit Lilion Jones Aidan N.Gomez Lukasz Kaiser IIIa Polosukhin", + "bbox": [ + 184, + 276, + 339, + 297 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 243, + 310, + 277, + 318 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on tw machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature.", + "bbox": [ + 158, + 320, + 367, + 414 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Attention Is All You Need", + "text_level": 1, + "bbox": [ + 403, + 323, + 501, + 330 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Ashish Vourakis, NM Nazarov, NJI Parmar, Jbab Udekoreli, Lillion Jones, Adiinil S. W. Coomce, Lakota Kiber, Pala Poslashov, and M. A. D. G. R. Smith. 2017. The neural network models are based on recurrent or conventional neural networks in an encoder-decoder loss. The best produced models also connect the encoder and decoder loss through an attention mechanism. As, we propose a new study mechanism for a machine, the Transformer, based attention mechanisms, dispensing with the same weight as the encoder-decoder loss. The results of the training tasks show these models to be superior in quality while being more parsibilized and requiring significantly less time to time to move train. Our model achieves 84.2 BLUE in the WMT-50 Go-to-translation task, which is comparable to the performance of our previous work [3]. In addition, our WMT-30 Pragmatic task, our model established a new single-model state-of-the-art-state-of-the-art BLUE score of 41.8 when $\\alpha = 5$ training for 3.5 days on GPUs after fraction of the training costs of the best models from literature. We propose Transformer generalizes well by applying it successfully to English syntacticity parsing both with large and limited training data.", + "bbox": [ + 397, + 330, + 566, + 415 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Attention Is All You Need", + "text_level": 1, + "bbox": [ + 602, + 228, + 705, + 236 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Ashish A. [Al] You. Wea a nono' Ainon 1 \nAshish Yawani, Naqadzaree, laokotri \nAlok Uzzotner, Anokalika Sanik, Jokoslav adar, Gosak III Ploosukhaini \nAlok Uzzotner, Anokalika Sanik", + "bbox": [ + 602, + 241, + 692, + 268 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Antext, exotocic sequecra transoedscnncs on cbr be caes baccared on bracococcyne nort bell netiabon an ecocycclion, an ecocyclon. TeTrane: the ensonnnmnsn neeepnckian. Epipcnie rile kceely on meenctiny As adeterdiencr. \nnpopioors sonr tonarwamchim. I mtnr. vorti. inenpoea a dedusum minnyss onomcrh. cortordone.lora ontata tose or uin hoperiosper. \nThe rorner is s oovl dtt maive de acemnccnodkdu aleu cormunb-dlr bing dvl-ndr 016de and mechance \n11 Dae nucnccnng \nceso nucnse \n12 - eannnr er on attonne amehnes asnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnnccnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn", + "bbox": [ + 602, + 280, + 828, + 417 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "GPT40", + "bbox": [ + 233, + 426, + 290, + 439 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 416, + 426, + 545, + 441 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Playground-v3", + "bbox": [ + 663, + 425, + 772, + 441 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Input Text:", + "text_level": 1, + "bbox": [ + 161, + 448, + 259, + 463 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "\"Generate A realistic screenshot of the first page of the Paper from the following information:", + "bbox": [ + 158, + 465, + 795, + 500 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Title: Attention Is All You Need", + "bbox": [ + 158, + 503, + 411, + 518 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Author List: Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin", + "bbox": [ + 158, + 522, + 833, + 556 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Abstract: The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.\"", + "bbox": [ + 158, + 560, + 831, + 824 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Figure 8: Task: Document image generation. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and Playground-v3 [64]. Observation: GPT-4o can generate more consistent and accurate font and format than the other two models.", + "bbox": [ + 125, + 842, + 870, + 883 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Document Image Generation", + "text_level": 1, + "bbox": [ + 169, + 150, + 390, + 167 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/e960d89a9332326f5fac58239c1d5784d9e237d7ccede93ea1c82462b3f589b3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 176, + 354, + 194 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Evaluation: Text Rendering Precision.", + "text_level": 1, + "bbox": [ + 356, + 180, + 650, + 196 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", + "text_level": 1, + "bbox": [ + 161, + 224, + 379, + 250 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Jacob Devlin Ming-Wei Chang Kenton Lee Kristina Toutanova", + "bbox": [ + 171, + 260, + 334, + 277 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 251, + 290, + 287, + 299 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result; the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications.", + "bbox": [ + 158, + 301, + 383, + 366 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to $80.5\\%$ (7.7% point absolute improvement), MultiNLI accuracy to $\\mathcal{S}6.7\\%$ (4.6% absolute improvement), SQAud v1.1 question answering Test F1 to 93.2 (1.5 point absolute", + "bbox": [ + 158, + 366, + 387, + 402 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 241, + 409, + 299, + 422 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", + "text_level": 1, + "bbox": [ + 416, + 224, + 583, + 247 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Ashlor Jacob Doslin Ming Wei Chang, Kenton Lee Abstrut Win, Karinlin Touranosa", + "bbox": [ + 419, + 250, + 578, + 265 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Cryostatnet as Transformer. Unlike recnec mean, BERT is designed by pre-train deep bidirectional representations from nonshaded text by jasily selfconforming on both text 1st&4, xavier coint eorect, tate 1st&2 to win 1st, sacp0-twincent BERT model can be fine mused with just one additional output layer to create of dafve for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications.", + "bbox": [ + 416, + 265, + 588, + 320 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "BERT is conceptually simple and empirically powerful. It obtains now state-of-the-art results on eleven evertweaven multimoment language processing tasks, including GLUE score to $80.5\\%$ (7.69 absolute improvement), pushing the GLUE alloselect improvement). MulaNLI accuracy to $\\varepsilon_{\\mathrm{M}} = 0.1$ , $\\nu_{\\mathrm{L}} < \\varepsilon_{\\mathrm{M}} / \\varepsilon_{\\mathrm{M}}$ (1.5 point absolute improvement) and SQoAD v2.0 Test F1 to 83.1 (3.1 point absolute improvement).", + "bbox": [ + 416, + 327, + 589, + 375 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 437, + 410, + 566, + 424 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "BERT: Pre-training on Lepi Bidellar Tansson Translons for Language Understond mderance litting' cetting from t cowf henvaming", + "text_level": 1, + "bbox": [ + 620, + 229, + 823, + 247 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Author, List:", + "J Asbad Devlin, Yiw Changuaguagaa Kionn age rspgectangane cans \n $^{a}$ pressin liKistcn-Toutanfa", + "represeons-Uintanlvania" + ], + "bbox": [ + 620, + 250, + 820, + 273 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A", + "bbox": [ + 620, + 275, + 647, + 282 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We introduce a new languagegretrovetercendentiale monoclin klonstionist monole conBldfecarstaadss reprenters from nemer raje Sfiflnonanecones. desessnissrall ranauagaleafdelfe xyn unming on hnlaesaeare two ploddes also the por-entant canteletory a state-vwraon one-on-one coffice of anisotropy, and the ploso-syntropic colective of states. Of s2s212310 pain or questionbmporansuansluangene Tcnf?f to ingest ingf Sf10 tto 46 w. (I, test),marshersonalizne imnance immmens", + "bbox": [ + 620, + 286, + 834, + 328 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "BERT is conceptually imlilienenarplenpholcft-nu surate-fine-ams", + "ronen 1 oonranaalwauanu viipopluoteforocnirandinns inget caught anourage, vovlurvulgina for nain. 2004. The use of the word \"sulfur\" in the text is a question envoing SUIF u697.", + "- GBFEscors/aanoreqquasurf and Squad w.10 aninlvalte 83.7% 4.6% (X) \n- TeST fto onop:11.3% (x)", + "→ BERT is cponconuynlyrsnaintally pocefine-at-ut: ouvah176. (JET v.c.37% quinting anguen linyuH-aCLS sccorts onoonssthe sonea 4000A/AresoVc LEAU pioiHcB: gnrmaeh an epesrourinans A7c)0v o.o35 aed 1170" + ], + "bbox": [ + 620, + 338, + 836, + 402 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Playground-v3", + "bbox": [ + 676, + 409, + 785, + 426 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Input Text:", + "text_level": 1, + "bbox": [ + 161, + 440, + 258, + 455 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Generate A realistic screenshot of the first page of the Paper from the following information:", + "bbox": [ + 158, + 457, + 789, + 491 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Title: BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", + "bbox": [ + 158, + 494, + 733, + 532 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Author List: Jacob Devlin, Ming-Wei Chang, Kenton Lee, Kristina Toutanova \nAbstract: We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications.", + "bbox": [ + 158, + 534, + 834, + 700 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to $80.5\\%$ (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).", + "bbox": [ + 158, + 704, + 834, + 797 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Figure 9: Task: Document image generation. The Setup and Observations are the same as Fig. 8.", + "bbox": [ + 176, + 837, + 816, + 853 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/9c2265499ec9f6a0c6e61a8914311c8b94d20424cd7a53a126ce5f30f3f6bf33.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 152, + 354, + 172 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Evaluation: Text Rendering Precision.", + "text_level": 1, + "bbox": [ + 357, + 157, + 650, + 174 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "You Only Look Once: Unified, Real-Time Object Detection", + "text_level": 1, + "bbox": [ + 169, + 212, + 344, + 250 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Joseph Redmon, Santosh Divvala, Ross Girshick Ali Farhadi", + "bbox": [ + 174, + 258, + 359, + 273 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 250, + 285, + 284, + 292 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We present YOLO, a new approach to object detection. Prior work on object detection repurposes classifiers to perform detection. Instead, we frame object detection as a regression problem to spatially separated bounding boxes and associated class probabilities. A single neural network predicts bounding boxes and class probabilities directly from full images in one evaluation. Since the whole detection pipeline is a single network, it can be optimized end-to-end directly on detection performance.", + "bbox": [ + 166, + 292, + 375, + 345 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Our unified architecture is extremely fast. Our base YOLO model processes images in real-time at 45 frames per second. A smaller version of the network. Fast YOLO, processes an astounding 155 frames per second while still achieving double the mAP of other real-time detectors. Compared to state-of-the-art detection systems YOLO makes more localization errors but is far less likely to pred", + "bbox": [ + 166, + 345, + 375, + 387 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "You Only Look Once: Unlimited, Time/Indirect Decciption \nAuthor:Lesser Jowesh Redmonial Siptedthi Adri Farhad", + "bbox": [ + 415, + 215, + 580, + 233 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We present YJLO, a new approach to object detection. Driver search uses oleejeon cieeuses to represen or correct deferential finwication. Istegedel, when farstial, we confirmed anapase, agate trius to a signafal or sialipal staphylococcal preprobed boing boos summarilyour heartbodn frontal nater and the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the", + "bbox": [ + 413, + 234, + 622, + 258 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Acatat Rechun-Yorim Bogae lo rojctc filocly", + "bbox": [ + 413, + 260, + 496, + 265 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Our undersonged tandoce boe + ant - or cemoe - aepocemis in a times, bodingly narmabogus haarban. Jnci enwecstic ancoontie fluy fast YOLO pue moebes, 45f rans perennetioles unperenentio- dorensis in reel thane reobexes petarges. ectcylcfom oene trsoute of sucrose princeia of docuta. d. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . ..", + "bbox": [ + 413, + 268, + 622, + 295 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/5789a11c88dd67fe3ecf18424e18808df9be15ba558dca4d60893646f7ed02f2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Abstr. LOC nomencl.
FascicristinFas1/3
MADS-eos-miR-fc::rec_mucmuc
fli-2 (C9-03)fli-25p438%
#868E1 (b-cd)#868ap33<%
#868E1 (b-cd, c-d)#868ap33<%
#868E1 (b-cd, d-c)#868ap33<%
#868E1 (b-cd, e-c)#868ap33<%
#868E1 (b-cd, f-c)#868ap33<%
#868E1 (b-cd, g-c)#868ap33<%
#868E1 (b-cd, h-c)#868ap33<%
#868E1 (b-cd, i-c)#868ap33<%
#868E1 (b-cd, j-c)#868ap33<%
", + "bbox": [ + 416, + 297, + 627, + 344 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "aAldofoi, aS; atalfoi: all other anatolian oboforators of otects. 1 oxtinct nands, aHs: all extinctions, for foxtroomes sereis (outcited) I am Alstomos, Rictus sp. to theft, to rott and aftall to sell otles, or licee, and so on.", + "bbox": [ + 413, + 345, + 622, + 359 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "```bash\n#renatIdack Corrgend to state-vtextction system to objects. CN in All and Chon\nPectmon Is dendrites into Commute on ooclastin or Donr Tnp, eutment the\nimagery mod fringes to arf the Articn on A mitronin or aortothetotnoid\nned\n#recognition VOLCOVOLD, Cogenture: GcGmCunlty: VOLCOVOLD", + "bbox": [ + 413, + 364, + 622, + 388 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 241, + 396, + 299, + 410 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 450, + 397, + 578, + 411 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "You Only Look Once: Unified, Real-Time Object Detection", + "text_level": 1, + "bbox": [ + 648, + 199, + 802, + 219 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Joseph Redmon, Santosh Dlwala, Ross Girishk, Ali Farhedi Abstract", + "bbox": [ + 648, + 220, + 803, + 233 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We presentYOLO, a new approach to object detection. Prior work on object detection epires classifiers to perform detection. Instead, we frame object detection as a regression problem to spatially separated bounding beves and associated class probabilities. A single neural network, predicts brednis bounding boxes and class reliabilities directly from full evaluation. Since the whole detection pipeline is a singkwork, it can be optimised end-to-end directly on detection performance.", + "bbox": [ + 648, + 234, + 834, + 284 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Our unified architecture is extremely fast. Our base VOLO model precursors images in real-time at 45 frames per second. A smaller version of trieva, Faat VOLO, processes an aetounding 155 frames per second. ¥8 frames per second while clll achieving double the MAP of real-time detectors. Compared is sisl-site detection systems VOLO makes mark deterrms. VOLO makes more lecdiscipli predict fasse ertris to ter en ptiplicit false detections where nothing exists. Finally, VOLO, VOLO lesrs vs every revalur representations of objects all other detection methods, including DPN and R-CNN, by a wide when generalizing from natural images to artwork artwork on both on the Picasso Dataset and the People-Art Dataset.", + "bbox": [ + 648, + 290, + 828, + 367 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Playground-v3", + "bbox": [ + 687, + 395, + 797, + 411 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Input Text:", + "text_level": 1, + "bbox": [ + 161, + 426, + 258, + 441 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "\"Generate A realistic screenshot of the first page of the Paper from the following information:", + "bbox": [ + 158, + 444, + 794, + 477 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Title: You Only Look Once: Unified, Real-Time Object Detection", + "bbox": [ + 158, + 482, + 653, + 500 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Author List: Joseph Redmon, Santosh Divvala, Ross Girshick, Ali Farhadi", + "bbox": [ + 158, + 501, + 715, + 517 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Abstract: We present YOLO, a new approach to object detection. Prior work on object detection repurposes classifiers to perform detection. Instead, we frame object detection as a regression problem to spatially separated bounding boxes and associated class probabilities. A single neural network predicts bounding boxes and class probabilities directly from full images in one evaluation. Since the whole detection pipeline is a single network, it can be optimized end-to-end directly on detection performance.", + "bbox": [ + 158, + 520, + 831, + 648 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Our unified architecture is extremely fast. Our base YOLO model processes images in real-time at 45 frames per second. A smaller version of the network, Fast YOLO, processes an astounding 155 frames per second while still achieving double the mAP of other real-time detectors. Compared to state-of-the-art detection systems, YOLO makes more localization errors but is far less likely to predict false detections where nothing exists. Finally, YOLO learns very general representations of objects. It outperforms all other detection methods, including DPM and R-CNN, by a wide margin when generalizing from natural images to artwork on both the Picasso Dataset and the People-Art Dataset.\"", + "bbox": [ + 158, + 652, + 823, + 820 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Figure 10: Task: Document image generation. The Setup and Observations are the same as Fig. 8.", + "bbox": [ + 171, + 859, + 821, + 876 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Document Image Generation", + "bbox": [ + 169, + 127, + 390, + 143 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "2.1.4 Panorama Image Generation", + "text_level": 1, + "bbox": [ + 127, + 90, + 383, + 106 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Panorama image generation aims at creating a 360-degree view of a static scene, enabling immersive and comprehensive visual experiences. In our experiments, we select Pano-SD [119] and Gemini 2.0 Flash [99] as the baselines, with representative results illustrated in Figure 11. The comparisons reveal that while the baseline models can generate coherent panorama-like images with seamlessly connectable left and right sides, GPT-4o struggles to produce a true panorama. In most cases, GPT-4o generates images that approximate a panoramic view but still fall short in ensuring the necessary continuity across the image boundaries. We attribute this limitation to the insufficient representation of panorama images in its training data, as well as a predisposition towards generating images with a higher vertical aspect ratio rather than a wider one. Consequently, in the realm of panorama image generation, GPT-4o is inferior to the existing baseline models.", + "bbox": [ + 125, + 114, + 872, + 241 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Panorama Image Generation", + "text_level": 1, + "bbox": [ + 183, + 263, + 403, + 280 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Evaluation: Is panorama image?", + "text_level": 1, + "bbox": [ + 357, + 290, + 640, + 309 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/1371bb3d27b320f551f1aebcf4ba83faae804c2369cc85b124cb8a6b9e8b9ec5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 151, + 315, + 336, + 411 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/470abd7050918de412b06326cdd51c4cece55e61b2888569447852d1b87f591d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 316, + 591, + 411 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/56f24172af8797ef15310df874ee2c02e4edcdae14624400e6196e1a2638ef82.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 599, + 316, + 846, + 411 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Input Text: \"Please generate a panorama image: A living room with hardwork floors, a fireplace, and large windows.\"", + "bbox": [ + 158, + 416, + 839, + 446 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/0771e85cb3485940ea65217af54563edb5368d1ed7f290c2f9c5ad8bdccee8ee.jpg", + "image_caption": [ + "Figure 11: Task: Panorama image generation, aiming to create immersive 360-degree views of static scenes. Setup: We compare GPT-4o with established baselines such as Pano-SD [119] and Gemini 2.0 Flash [99] to evaluate the generation of coherent panoramic images. Observations: While the baseline models reliably produce panoramas with seamlessly connected left and right sides, GPT-4o tends to only approximate a panoramic view and struggles to maintain continuity across image boundaries. This shortfall is likely due to limited panorama image representation in its training data and a tendency to generate images with a higher vertical aspect ratio rather than a wider one, rendering it inferior to the baselines in this task." + ], + "image_footnote": [], + "bbox": [ + 151, + 450, + 336, + 546 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/d163c0b8a05c2d83cbe87b0c94311aadf8a7505417a3639fcd47509e36995332.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 450, + 589, + 546 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/eba5be0e2a33e5791d2df2eae6ba493b1daf01dd71d6fba92adff187a344d471.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 596, + 450, + 846, + 546 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Input Text: \"Please generate a panorama image: A cozy study with built-in bookshelves and a leather.\"", + "bbox": [ + 158, + 550, + 818, + 579 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/f735b915cfa2ba7a85ad7998eea453cc54fd14b2e904d305b4cea528c2185f2a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 151, + 585, + 333, + 680 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/05f5665f542cca655067ebe6c202394a79f20e88be3af6125a418ad1ddb38552.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 585, + 589, + 680 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/ab89160a98b1c9923175245ecb1b778b6c8bedaa2a414c4e3a82178ef3554b45.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 596, + 585, + 846, + 680 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Input Text: \"Please generate a panorama image: A bedroom with a ceiling fan, gray walls, hardwood floors, a bed, and a TV on the wall.\"", + "bbox": [ + 158, + 685, + 789, + 715 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 214, + 723, + 269, + 737 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 403, + 723, + 522, + 737 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Pano-SD", + "bbox": [ + 687, + 723, + 751, + 737 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "2.2 Image-to-Image Tasks", + "text_level": 1, + "bbox": [ + 127, + 90, + 323, + 104 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "2.2.1 Style Transfer", + "text_level": 1, + "bbox": [ + 127, + 116, + 282, + 131 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Style transfer is a classic yet evolving task in computer vision, aiming to render an image in a specific artistic style while preserving the original content. It bridges the domains of vision and art, enabling applications such as digital artwork creation, film post-production, and virtual reality environment design. Early approach [33] used convolutional neural networks to separate and recombine content and style representations from images. This seminal work enabled the artistic stylization of photographs by optimizing pixel values to match a desired style. To improve efficiency, Johnson et al. [47] proposed feed-forward networks for real-time style transfer using perceptual losses. Later methods such as AdaIN [43] and WCT [57] enabled arbitrary style transfer without retraining for each new style. Transformer-based models like StyTr² [23] have been introduced to enhance style transfer quality and better preserve structural details. More recently, with the rapid development of image synthesis techniques, especially diffusion models, style transfer has seen further advancements in both quality and controllability. However, transferring specific artistic styles still typically requires a non-trivial amount of training data.", + "bbox": [ + 124, + 140, + 870, + 294 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "To comprehensively evaluate the style transfer capability of GPT-4o, we conduct comparisons against several recent competitive models, including Gemini 2.0 Flash [99] and Midjourney v6.1 [75]. Specifically, Figure 12 illustrates style transfer results for natural scenes, while Figure 13 focuses on human facial images. Across a diverse range of styles, such as Monet, Van Gogh, Pixar, Cyberpunk, Snoopy, Disney, Ghibli, and Cubism, GPT-4o demonstrates consistently superior performance in both stylistic fidelity and content preservation.", + "bbox": [ + 124, + 297, + 870, + 369 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Notably, in the case of Ghibli style transfer, GPT-4o exhibits remarkable fidelity to the original artistic aesthetics, closely resembling the target style with vivid color palettes and soft contours. In contrast, both Gemini and Midjourney often produce inconsistent visual styles and textures. Furthermore, GPT-4o excels at preserving fine-grained content details, such as facial structure, earrings, clothing, and hairstyles, which are often misrepresented or lost in the outputs of other models. These results suggest that GPT-4o not only captures high-level style semantics but also maintains strong spatial consistency and semantic alignment.", + "bbox": [ + 124, + 373, + 870, + 459 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Prompted Stylization", + "text_level": 1, + "bbox": [ + 161, + 210, + 328, + 226 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Evaluation: Consistency/style.", + "text_level": 1, + "bbox": [ + 362, + 223, + 622, + 242 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/0fe2326eaaff906398f84779d9f2f8a9b656dcf843adbe8b635b0ebccc3b64c1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 153, + 247, + 321, + 334 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/41dc891dc5a7c3692e3d168e6b8d7aeecf6ee926ceb54032fac4c7482b335993.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 323, + 247, + 500, + 332 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/275a5a4359f968af20b14db343e03cc70f52a75880ac223fb413656701f9803b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 246, + 681, + 332 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/87edc272818725170e7b6db73459109f3c3967fc70ae14b1e2acffc417d4290d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 683, + 246, + 849, + 332 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Input Text: \"Generate the Monet style of this picture.\"", + "bbox": [ + 305, + 334, + 699, + 348 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/750636006f7b07b0af11afa452e7037a2d9a06614ababcd014be1498c416fd65.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 153, + 349, + 321, + 436 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/1ccf7d1613f12e0b81fba30f86e01357733d489b633304f1597188ed4f258e67.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 323, + 349, + 500, + 435 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/a8a15f935780009ae4408165cc5d5abf2965efa5cead6b31539ab6f6570babf3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 349, + 681, + 435 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/577ab214cdb8de2d03a7a2be9b878ed69afb604d28c0d21381d8da756093e973.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 683, + 349, + 849, + 435 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Input Text: \"Generate the Van Gogh style of this picture.\"", + "bbox": [ + 302, + 438, + 718, + 450 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/c464a9f6f3f24ab0f65f5488c132ccea546a4af9ede651088ae6e7a2961516ec.jpg", + "image_caption": [ + "Figure 12: Task: Style transfer, aiming to render an image in a specific artistic style while preserving the original content. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and Midjourney v6.1 [75] on natural scene style transfer across multiple artistic domains. Observations: GPT-4o exhibits significantly better content preservation compared to Midjourney v6.1, maintaining fine-grained content details and structural consistency. In terms of style, it faithfully adheres to the textual description, effectively rendering vivid color palettes and soft contours that characterize the target style. This alignment notably surpasses both Gemini 2.0 Flash and Midjourney v6.1, highlighting GPT-4o's strong capabilities in preserving content and faithfully rendering diverse styles." + ], + "image_footnote": [], + "bbox": [ + 153, + 453, + 321, + 539 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/67dd368a9fc83144d81f03926e3e616fb1c6241004b301cf8c36f99e0c45dd8d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 323, + 453, + 500, + 537 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/badd5fb657f99aaab26d4d7e7efa36a6fb517d65faf566ed3fe8c5ae8f6f35d8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 453, + 681, + 536 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/598045ae955aa43906186813c0efdb19e703aeb859e6842ca9badf105529c011.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 683, + 453, + 848, + 536 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Input Text: \"Generate the Pixar style of this picture.\"", + "bbox": [ + 307, + 539, + 692, + 553 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/f68ac7b3f9c3b4527403386e94ea70946346ec05ae2dde329bd666632c16d005.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 153, + 555, + 321, + 642 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/de2131f5c3c16b1cda2aa0dec3ae6fe0689fffc4267105db8583bef2e64b0cad.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 323, + 555, + 500, + 640 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/a85c872e242b8a76e3a8010c80983519f0fb6a8346e50773ef2e7fab0b390d2a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 555, + 679, + 640 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/1f67f57b31e0864c333e12afce49cff3e3a9e54902f8fabbbf522d8e1b2bcb07.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 683, + 554, + 849, + 640 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Input Text: \"Generate the Cyberpunk style of this picture.\"", + "bbox": [ + 303, + 643, + 730, + 657 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 191, + 664, + 282, + 679 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 383, + 664, + 436, + 676 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 527, + 664, + 645, + 676 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Midjourney v6.1", + "bbox": [ + 702, + 662, + 815, + 678 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Prompted Stylization", + "bbox": [ + 222, + 95, + 370, + 109 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Evaluation: Consistency/style.", + "bbox": [ + 398, + 109, + 630, + 123 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/7b58c3184e1b97ea204211d53437c27406bef28c23fb6a50586f6c0f1a5bfe24.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 212, + 125, + 346, + 246 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/4a7dfd47b3b4f45d4ea17721125d0fe632c1243e9f989c4ef96f7157ca1eac3f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 377, + 125, + 488, + 244 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/1a4e0134e2181e538cfa34d801a9a29589af46e4564af10bde2b0a48bdef3123.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 516, + 125, + 648, + 244 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/0e6d7e6671a48677bfae227082d4c8fb8b48df29d71b55960f2c402f7f628630.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 665, + 125, + 797, + 244 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Input Text: \"Generate the Simpsons style of this picture.\"", + "bbox": [ + 328, + 246, + 694, + 258 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/9e5e6d93ce20c260e565400b12fea89bd8bc529187af868dd712070ab1c328f8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 212, + 258, + 346, + 378 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/8e5d8cc638f17208ad4fb443ff0683574d425e64d854400f82678e0227eca37e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 377, + 258, + 486, + 378 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/aa5d8bd6655c3924c3860521fa7d3aabfae95679b8638674f96b892057031bdf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 516, + 258, + 650, + 378 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/0459972536685fbb8896000173723ed46bcf8309ab5dd8cd3867ab99d79da23a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 258, + 795, + 378 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Input Text: \"Generate the Snoopy style of this picture.\"", + "bbox": [ + 333, + 381, + 687, + 393 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/70346a54c2888c50ce4473234e1e5a51c19e9b3df375364d24bb2a98a5a7153a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 212, + 393, + 346, + 513 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/959c69e3cc69906dbb804b63c7b374843496df51ef6f1569b100b6937422b431.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 377, + 393, + 486, + 513 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/fc8ea4a8390ad6f9e57e8f5aab23790b3bd6070cec727042772246ae96706efb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 516, + 393, + 650, + 513 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/4c59a95a5b0ba1c9ed0c407b8724156fd81dd1c21744e6e8075190bae52e10fd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 393, + 799, + 513 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Input Text:\"Generate the Disney style of this picture.\"", + "bbox": [ + 333, + 515, + 683, + 527 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/4dbac1245c6ad15005db4a1d3ccb5720d30e6be34a344f45e008b6f608fc4cb6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 212, + 527, + 346, + 647 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/c9580af54f8032d511fbb97fa1439c5960b355c6d89a6da9548e03d6da674129.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 377, + 527, + 486, + 646 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/a24d3f139726d7234b378db7110157186e07c2e355a0860dbbefa990e5431c62.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 516, + 527, + 650, + 646 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/1c79b6d712c77204bf93747fafc74f99e0ed9cd9216c159ddbe9f67834fa0ee5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 527, + 797, + 646 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Input Text: \"Generate the Ghibli style of this picture.\"", + "bbox": [ + 336, + 648, + 679, + 660 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/899f06b2c3cf50b2d39323fff3222f827df1c1eb152fa1ed8c87460c8158613c.jpg", + "image_caption": [ + "Figure 13: Task: Style transfer, aiming to render an image in a specific artistic style while preserving the original content. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and Midjourney v6.1 [75] on human face style transfer across multiple artistic domains. Observations: GPT-4o exhibits significantly better content preservation compared to Gemini 2.0 Flash and Midjourney v6.1, maintaining fine-grained content details and structural consistency. In terms of style, it faithfully adheres to the textual description, effectively rendering vivid color palettes and soft contours that characterize the target style. This alignment notably surpasses both Gemini 2.0 Flash and Midjourney v6.1 far away, highlighting GPT-4o's strong capabilities in preserving content and faithfully rendering diverse styles." + ], + "image_footnote": [], + "bbox": [ + 212, + 660, + 346, + 780 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/eae0730d55066371b3326e9ed3ab31f01ca11d2267e31b1a8c17fc7663f2714f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 379, + 660, + 486, + 779 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/d7a7f1fc31d3a9490433f88485c2a950c61d2d98019f2ef1f9666ccb7987ad48.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 516, + 660, + 650, + 779 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/c65cc8644013933cd8a0d822cb884ca3b9ab0b6f9e149296ab44724e475fa681.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 661, + 799, + 779 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 243, + 790, + 323, + 803 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Input Text: \"Generate the Cubism style of this picture.\"", + "bbox": [ + 334, + 780, + 687, + 791 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 408, + 792, + 454, + 803 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 531, + 792, + 632, + 803 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Midjourney v6.1", + "bbox": [ + 686, + 791, + 785, + 804 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "2.2.2 Image Editing", + "text_level": 1, + "bbox": [ + 127, + 90, + 282, + 107 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Image editing involves modifying the visual elements, composition, or data of an image to achieve a desired outcome. This process can range from minor refinements to significant alterations, while maintaining the integrity of the original image. Over time, image editing techniques have evolved from manual, labor-intensive methods to sophisticated AI-driven approaches. Prior works [10, 30, 9, 120, 5, 29, 4, 40] have demonstrated the ability to perform various editing tasks based on textual instructions, such as adding, removing, or replacing objects; altering backgrounds, colors, or styles; and adjusting the number, size, or positions of objects. However, these models still exhibit limitations in certain scenarios, particularly in preserving non-edited regions, maintaining consistent image characteristics, and ensuring seamless blending between edited and non-edited areas.", + "bbox": [ + 124, + 114, + 870, + 227 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "We compare GPT-4o with MGIE [30], LEDs++ [9], MagicBrush [120], and Gemini 2.0 Flash [99], which are representative of current SOTA methods. These experiments evaluate GPT-4o's subject preservation and instruction-following capabilities to determine its effectiveness compared with existing methods. Comparative results are shown in Figure 14 through Figure 19. We find that GPT-4o achieves performance comparable to, and in many cases surpassing, SOTA baselines in image editing tasks. From these examples, GPT-4o exhibits the fewest failure cases, demonstrating a strong generalization ability across a wide variety of editing tasks. It consistently outperforms baseline models across multiple editing scenarios. We highlight several key observations:", + "bbox": [ + 124, + 231, + 870, + 332 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Strengths of GPT-4o in image editing:", + "bbox": [ + 171, + 340, + 454, + 357 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Fine-grained editing: GPT-4o shows a superior ability to handle fine-grained editing tasks. For instance, in example 2 of Figure 14 and example 1 of Figure 15, GPT-4o successfully modified small, detailed objects such as a toothpick and pink ballerina slippers, outperforming prior methods.", + "- Substantial image transformations: GPT-4o excels at large-scale edits, such as background changes or object transformations, while maintaining visual coherence and realism. These complex edits require robust contextual and semantic understanding. Example 1 in Figure 16 illustrates GPT-4o's effective handling of a major background alteration task.", + "- Subject preservation: GPT-4o demonstrates strong subject-preserving capabilities, avoiding common artifacts such as facial distortions or component loss. In example 2 of Figure 14, GPT-4o retains the content of a drink that Gemini 2.0 Flash erroneously altered. Similarly, in example 5 of Figure 19, GPT-4o best preserves fuselage patterns and textual markings on an airplane.", + "- Instruction and original image adherence: GPT-4o shows a notable ability to follow instructions and maintain the structure of the original image, particularly in style editing and tasks involving object quantity, size, or position. This likely stems from its advanced understanding of both the image content and the editing instructions. For example, Figure 18 demonstrates GPT-4o's capability in style translation. Example 2 in Figure 17 shows its understanding of the term \"orange\" in both textual and visual contexts. A similar ability is illustrated in example 4 of Figure 19." + ], + "bbox": [ + 200, + 359, + 867, + 603 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "- Limitations of GPT-4o in image editing:", + "bbox": [ + 171, + 606, + 467, + 622 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- GPT-4o underperforms in scenarios where strict preservation of the original image's lighting, shading, and color tones is required. In such cases, the edited images may exhibit noticeable shifts in visual consistency. This is evident in examples 1 and 5 of Figure 14 and example 4 of Figure 15.", + "- In some cases, GPT-4o may fail to retain image details outside the intended edit region. For instance, example 4 in Figure 14 shows a degradation in image quality in non-targeted areas." + ], + "bbox": [ + 200, + 626, + 867, + 698 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In summary, GPT-4o demonstrates substantial advancements in image editing, showing exceptional capabilities in detailed and large-scale edits, subject preservation, and adherence to instructions. While there are limitations in strictly maintaining original image characteristics such as lighting and tonal consistency, GPT-4o significantly reduces failure cases and outperforms existing baselines across a wide range of editing tasks, pushing the boundaries of current SOTA performance.", + "bbox": [ + 125, + 710, + 870, + 781 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/cde0ac3d92eb933391cdf6879af267478293459d8efb075cb2c215e135eaf5de.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 133, + 357, + 150 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Evaluation: Instruction-following / faithful.", + "text_level": 1, + "bbox": [ + 354, + 133, + 663, + 151 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/413f16191d537cd336a6645867fb4044e2d27954d8e97a6f61443fdadd988968.jpg", + "image_caption": [ + "Input Text: \"Add a notebook to the desk.\"" + ], + "image_footnote": [], + "bbox": [ + 192, + 159, + 336, + 268 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/e6edd2726b0a0b32b22ba7d28c24eb54d68706defe2d03d01109b73577922099.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 157, + 493, + 270 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/2b5cbcdbe86ff5d8016d28bf0a81a16d30075a3c50e64b84bfd2e7a4f9dbfb54.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 157, + 650, + 268 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/9b6616b3e3e491d5719d2c2596fedf5f8396a133bfd0edae1a8a64d343c4cd8c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 159, + 805, + 268 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/e1866d261c2cafcac4f4dbe2ed648662bbfe95d442c888ae66ee515ecf40b804.jpg", + "image_caption": [ + "Input Text: \"Put a toothpick in the top of the left sandwich.\"" + ], + "image_footnote": [], + "bbox": [ + 192, + 287, + 336, + 398 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/b169d0c531726750bbaeeb2fde03b21e893573395b4260fafdf640d946589a90.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 287, + 491, + 397 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/f8056f09456f348f5dfebcc2e29caddc6eaffd73644a4b2e58c236fd4f647220.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 287, + 648, + 398 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/58100ddfbfd9cfbbcbe07188ce06d08a6990468de6ef91ce1596ca7e44f0513d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 287, + 805, + 398 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/ab999568dff4abd05c3fdb65bddbc6ea19103d123770203b58e1c0014cd42fd2.jpg", + "image_caption": [ + "Input Text: \"Change the goats into moose.\"" + ], + "image_footnote": [], + "bbox": [ + 192, + 417, + 334, + 525 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/b33cf7d522604acf3a5bd5c343b02f71b892c5585b03a7601c2a93da774f7f45.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 417, + 491, + 525 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/0f95244911c8dc29653de6ffbe04b37fc4545e698fba41f0e9c612f8ef2eea1a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 417, + 648, + 526 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/c13377bc306094edb2168ae5ee5698264067e0fb2c892c3a361247e6688d7d37.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 417, + 803, + 526 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/454c99016219342e261335a7e82a9c5e7a05daa487fd74aa7d33328c05c7b4b4.jpg", + "image_caption": [ + "Input Text: \"Replace potatoes with baked beans.\"" + ], + "image_footnote": [], + "bbox": [ + 192, + 545, + 334, + 656 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/2f5b1c3c3a38c0ef1182e319f7bb487e17c8dec0cf207f9ddae29bd0055c93dc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 545, + 493, + 655 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/33b1124e10743638cb062747979a788b7ede97dab752dac0e2e8cac1db1a2516.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 545, + 648, + 655 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/460d34550756a1743afe1027e9058e86963ba9586255d860b0f2b1c270b9280e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 545, + 805, + 656 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/bc1073fb4ee6a5f6762fbba75598ee620fd0b2f89f1eb8f7b28e02c9a973045e.jpg", + "image_caption": [ + "Input Text: \"Change the fire hydrant to a parking meter.\"", + "Input Image", + "Figure 14: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MGIE [30]. Observations: GPT-4o achieves higher success rates than MGIE (examples 2/5) but occasionally alters unintended elements (bread in example 4) or lighting/shading structures (example 5). This likely stems from stronger generalization capacity and creative adaptation focus in training, though reduced fidelity suggests insufficient constraints on structural details during fine-tuning." + ], + "image_footnote": [], + "bbox": [ + 192, + 674, + 336, + 782 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/448a4c7fafc6c14e57593c609ff7476f3fc55ea22624bc38b74099fe39fc507a.jpg", + "image_caption": [ + "GPT 40" + ], + "image_footnote": [], + "bbox": [ + 349, + 674, + 493, + 782 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/c5822478e8f8995312ffe08cd952a629abcc29449b18ab827236ad66d1633b87.jpg", + "image_caption": [ + "Gemini 2.0 Flash" + ], + "image_footnote": [], + "bbox": [ + 506, + 674, + 650, + 782 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/daa8c1bb0acfd6e425d089f419400308811f1212d5b44f851d12ef8b15bbe500.jpg", + "image_caption": [ + "MGIE" + ], + "image_footnote": [], + "bbox": [ + 663, + 674, + 805, + 782 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Image Editing", + "bbox": [ + 220, + 101, + 323, + 118 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Image Editing", + "text_level": 1, + "bbox": [ + 218, + 99, + 321, + 116 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/2929ca2301ebf3a6fed0fa5d79fbf4b686d60eeaac8dec6a4e0cf76ff49e665e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 126, + 354, + 142 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Evaluation: Instruction-following / faithful.", + "text_level": 1, + "bbox": [ + 354, + 128, + 661, + 145 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/93239b7986d7e146b41b57dc317de85153c1f5d166ec11db11dfd7fe2702e17c.jpg", + "image_caption": [ + "Input Text: \"Turn everyone shoes into pink ballerina slippers.\"" + ], + "image_footnote": [], + "bbox": [ + 194, + 148, + 334, + 258 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/ab95d11bd4370f4489f59f00bf95b0420cf3629bf46e660886023f3f96118d89.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 148, + 493, + 258 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/6cecd99cb28dc56f82b143d8d1bfc81a9ffb34558b21ff22a2629368564a9e5f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 508, + 148, + 651, + 258 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/c8841acf64bd37b3e001194394bdc34bdbf3ce6e360349fc571e6d6f84b0e03a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 665, + 148, + 807, + 258 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/5ef2471bae5d6ac28f80fd9606df44d177b953395bf0f4d7328ae08ca174b1ee.jpg", + "image_caption": [ + "Input Text: \"Remove the fence from in front of the horses.\"" + ], + "image_footnote": [], + "bbox": [ + 192, + 277, + 336, + 386 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/530cb7230b747fa4e841052b66f6154fe575e082392dc9ebee75fb61c0ee6728.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 277, + 495, + 385 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/1c82b96334f0b7ff5da3c6643db4451a39f618d24f04fa9a677945a841872526.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 277, + 650, + 386 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/70e41d785dd49e7ac3f98b75bebf61abc711768215fa3f8fc2105d70e0467d9d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 277, + 807, + 386 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/2884734e16a9fec622bef494287b9ffd3440adb4d2a9f14797db74ec8bd44225.jpg", + "image_caption": [ + "Input Text: \"Remove the baby elephant in the picture.\"" + ], + "image_footnote": [], + "bbox": [ + 192, + 406, + 334, + 515 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/c4aa3522a28b388224935e752868d0c11d026b182a9b1476c099f85eecf8b27d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 406, + 493, + 515 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/6bd024f68b4e3ada55b3eb581d5cca2d29125289402827758186fcce06573946.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 508, + 405, + 653, + 513 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/2865dc07722a294ea85dfd9841947ab7e799e3d91a0f4a9092b0ff0c6fddd5ac.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 665, + 405, + 807, + 513 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/abf7f3e688bfa59a5ecc1db78f61491254ec021fedeea5c85187c6562809d61c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 194, + 531, + 336, + 641 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/625aa6127eca041e5810dc4b46c00716834bba9bb7fb4e2613067d7135aba20f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 532, + 493, + 641 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/fbfad470b3b72d3478e91210af29255466f94c98c96fb529b634e931bd2f9848.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 532, + 653, + 641 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/5447a1f82a340e52710bd0c108dac73285aab065976ccc90d3243a192c03666f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 531, + 808, + 641 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/013887f45aed224843b4df7ea2aba71451637e932d1143fbfcacbd27271f65f3.jpg", + "image_caption": [ + "Input Text: \"Change the yellow hat into a cowboy hat.\"", + "Input Text: \"Remove the people from the background\".", + "Input Image" + ], + "image_footnote": [], + "bbox": [ + 194, + 659, + 336, + 767 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/8acbf5757555da90dab4610d0040e9baac69b0e9e18c478fba61f00f601266a2.jpg", + "image_caption": [ + "GPT 40" + ], + "image_footnote": [], + "bbox": [ + 354, + 659, + 496, + 767 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/ef4e183b7dfd1b0ebe989bb80028076acf9467e603aef017798bb91bb385489e.jpg", + "image_caption": [ + "Gemini 2.0 Flash" + ], + "image_footnote": [], + "bbox": [ + 509, + 659, + 653, + 767 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/29d11d5b373c8c6684c115fc01b10b1a340f12dbedfc7dfb4b61051e6911c7fe.jpg", + "image_caption": [ + "MGIE", + "Figure 15: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MGIE [30]. Observations: From examples 1-3, GPT-4o shows higher success in fine detail edits and large-scale edits with occlusions. This likely stems from GPT-4o's stronger contextual understanding and ability to infer missing or obscured elements, enabling more precise localized edits and coherent large-scale modifications even with partial visibility. However, it sometimes erases non-target elements (e.g., the house in example 5) and significantly alters global lighting (example 4)." + ], + "image_footnote": [], + "bbox": [ + 665, + 659, + 807, + 768 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Image Editing", + "text_level": 1, + "bbox": [ + 220, + 99, + 323, + 116 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/a5f0857371c912d73812959c5154aa5f588c9c53b0cac8d82d05e68ed4b24ba2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 125, + 354, + 142 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Evaluation: Instruction-following / faithful.", + "text_level": 1, + "bbox": [ + 354, + 128, + 663, + 145 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/7fae08c7d278ec0abfd7dfa99c03539fe6218331e03f2ae1578f75bab7ed9747.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 196, + 151, + 338, + 260 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/7b6fe8843d08c99ead6fe3a01df0d1b2466ad14914ede1de9b670e85d3294614.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 150, + 495, + 258 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/18ac5988622ec524574bc3cacfb9b7b32ff24f0458ab883762dd159f18758912.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 148, + 648, + 258 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/64f0d91151654ae4516f7d303ba48c4e01706725697a79b75bb9494acae439d4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 148, + 807, + 260 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Input Text: \"Change the background to the set of a nickelodeon game show.\"", + "bbox": [ + 194, + 261, + 684, + 273 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/577dde2d7debd868c1455d80030c181d453b6b7c4530b75d54e42bb0bc034596.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 194, + 277, + 334, + 387 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/03db7b6af0e509243f783b8ce2d6a7d6f8244e6d1c3cca245f733119d0adc4d8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 277, + 493, + 388 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/526a547e833ad8f409b85457a4b260fbfa64463b2a497a191035f2bdb24ac6b7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 277, + 650, + 388 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/777c4e2019957c2a3491ab6ec8e5dbcb0731c4d26a35844fe6edc39ec5266953.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 277, + 803, + 388 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Input Text: \"Have the dog prick up its ears.\"", + "bbox": [ + 192, + 390, + 482, + 404 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/4717d46544cd52b4e89e284f9ced1f798ee955d4101bcead34498865073ada80.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 192, + 411, + 333, + 515 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/f8b04a681ef9c0a1c0fb445e1ea16e42cbc375d735002dad0a66db67be113463.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 411, + 493, + 515 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/67a212cd49f8bf5728997494e42d4d92f076bc6f5c1e66b1dd42cfdcaaf6779c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 508, + 410, + 650, + 516 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/8f600bc759b57a060077f8d020e12edc5a0d30fdd50d923a4ba69ef3ab7e3b8a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 410, + 805, + 517 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Input Text: \"Have the elephant's tail raised.\"", + "bbox": [ + 194, + 517, + 488, + 529 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/3986b51693c8099322835c4b144aa50d43241eafeb2e294ac53df6b06556a42b.jpg", + "image_caption": [ + "Figure 16: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MGIE [30]. Observations: From Example 1, GPT-4o demonstrates superior performance in style editing, effectively interpreting style instructions and preserving global image structure—a capability lacking in baseline models (MGIE, Gemini 2.0 Flash, and MagicBrush, as will be shown later). This likely stems from its stronger cross-modal comprehension and structural awareness during training." + ], + "image_footnote": [], + "bbox": [ + 194, + 532, + 334, + 643 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/f91e7c322f8dc4a9e7e58df1c8b1d035d289d156992fbfb80f834d5f06e3e4d2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 351, + 534, + 493, + 643 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/9ff188c0ba980c8e75a69ac4f116a1d53d641501e8a2976781fdf333d07f29ca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 534, + 648, + 641 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/a179e4417d79d0b36fecc454a7c170f44da2165e686f30f95e1e62f5dacec7e9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 665, + 534, + 805, + 642 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Input Text: \"Change the background to Vatican City.\"", + "bbox": [ + 196, + 646, + 540, + 660 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/15fc7c5a66f48467e502347e843669f0689718ebbba95832bb0c704a6633d0b7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 194, + 664, + 338, + 771 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/285c8caa7cc67cd2c1b2e70ff31cb3ba4484255739d14c8d701b34ac2e79ec36.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 354, + 662, + 495, + 771 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/aff34b2fc3ec9bfe6b6d30adc9b2581aab5368e8efd4ea317e48bc4626796721.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 662, + 653, + 771 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/e70d73a385cda6554925808b97816d9c1e848d834e5052992231cc4954cfd336.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 662, + 805, + 771 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Input Text: \"Change the background to Mount Rainier.\"", + "bbox": [ + 196, + 773, + 550, + 787 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 223, + 797, + 305, + 811 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 398, + 797, + 447, + 810 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 526, + 797, + 632, + 810 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "MGIE", + "bbox": [ + 715, + 797, + 756, + 810 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Image Editing", + "text_level": 1, + "bbox": [ + 223, + 101, + 325, + 118 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Evaluation: Instruction-following / faithful.", + "text_level": 1, + "bbox": [ + 334, + 127, + 665, + 147 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/0b872b88a7a28e69a1daae9c6423253e4db8e407800572b004fd616d814d9b24.jpg", + "image_caption": [ + "Input Text: \"Add a white hat to the woman's head.\"" + ], + "image_footnote": [], + "bbox": [ + 196, + 152, + 338, + 261 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/2c9aaa16beceac6929cab3c491e5386e4870c1879821502bc939b57f21a74ac6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 351, + 152, + 493, + 261 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/63887670dc86e68d25f445742e627570bc0bce4be5ac6001afe0c309c31c6b0a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 152, + 648, + 261 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/f1c73606a27398749d1cdb7f461d7eb75573cba818d1bde958bce44ebacb6dbd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 152, + 803, + 262 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/3fae030462e9df055665edfa8c3602ad144dba999e49b471e3dfd45d576c435a.jpg", + "image_caption": [ + "Input Text: \"Delete the oranges from the shelf in the image.\"" + ], + "image_footnote": [], + "bbox": [ + 197, + 280, + 338, + 388 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/bff611326012e121ed651fc6f65158c4598f63fd2b8ac3ecac83f8c3ed9bba15.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 354, + 280, + 495, + 388 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/40c5465d2843a6e6f41aaecc277b509f4346d85fd64000b3badbfe3d66eb079e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 280, + 651, + 388 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/784eb3154634368e8aff984cdd03ed74a47d81368a315b4c0bbe827a0051146d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 665, + 280, + 805, + 388 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/2893eb40cbf1268dab0c08335defc17fff61e8a62c9f7f88cbb40f2797ff174b.jpg", + "image_caption": [ + "Input Text: \"Get rid of the water the elephants are walking through.\"" + ], + "image_footnote": [], + "bbox": [ + 197, + 406, + 339, + 513 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/136b9b62bc4d4cdfd994c33d5283fe146823a2feb7d76000b30d2a22fbb628a3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 354, + 405, + 495, + 513 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/48b98ec70c96ab029b0a162958ffc7baf258a979ab48209783328fd17cc0c608.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 405, + 650, + 513 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/0aa8c1c419160769a182de4039193ca181bf320312a187d89844da7d1bcd55dc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 405, + 803, + 513 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 225, + 530, + 305, + 544 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "GPT-40", + "bbox": [ + 397, + 530, + 446, + 542 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 524, + 530, + 629, + 542 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "LEDS++", + "bbox": [ + 697, + 530, + 764, + 542 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/d4b9f29b0ab478363d67fd9b4ba47dd4d96e7162137bd71d2ff0b6ad6599db77.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 196, + 550, + 336, + 656 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/1d87a3ecf1aa5cd61e4772d303263ffdce0edea148e97f37a796904330cd58ee.jpg", + "image_caption": [ + "Input Text: \"Show the seal raising its head.\"" + ], + "image_footnote": [], + "bbox": [ + 352, + 550, + 491, + 657 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/69763d65a9a155bb94fbc23e0c07823b4fbfe54ae0150f8bf1838f34853a3bce.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 550, + 648, + 657 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/c55a85d62ceb01b432fd7559421cb62d05bd7e42574b82a7254f0ddc252a5fb1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 550, + 803, + 657 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/c8b6d0e8d66c6539cd6cfb4284248840031b6937e228c7f897330acc92dadf26.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 196, + 676, + 334, + 784 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/f206c5532d96bfcc1cf723bce40c254819c6dcc8f7c3dab25bd20471924a9c5d.jpg", + "image_caption": [ + "Input Text: \"Change the sky to stars at night.\"", + "Figure 17: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/LEDITS++ [9]/MagicBrush [120]. Observations: From Examples 2 and 3, GPT-4o demonstrates stronger comprehension of instructions involving 'the oranges on the shelf' and 'the water the elephants are walking through', translating this understanding into more accurate edits. This suggests better grounding of textual prompts in visual context during generation." + ], + "image_footnote": [], + "bbox": [ + 352, + 676, + 491, + 785 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/f0852ac207fe5ce1fa4118aefca299ece81ad07b3b1dabb16edd46a65c5a542c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 676, + 648, + 784 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/4f92cfa09792f7a936729795c4faf6f6742488baef7a2394db6b9a5dc3b73e04.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 676, + 803, + 784 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 227, + 801, + 308, + 815 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "GPT-40", + "bbox": [ + 398, + 801, + 449, + 813 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 526, + 801, + 632, + 813 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "MagicBrush", + "bbox": [ + 697, + 801, + 772, + 815 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Image Editing", + "bbox": [ + 220, + 108, + 320, + 125 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/8b47755995c00dd9ee49f071a5906d93f0c148923ccaa69fb15f0c73140fc71c.jpg", + "image_caption": [ + "Evaluation: Instruction-following / faithful." + ], + "image_footnote": [], + "bbox": [ + 331, + 135, + 354, + 152 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/f7744cbca6f65d6b7b76f3ed15020d3fc9b6f8ed98f636028e5ced70384c52bd.jpg", + "image_caption": [ + "Input Text: \"Change the image to a 1950s Flintstones cartoon art style.\"" + ], + "image_footnote": [], + "bbox": [ + 192, + 162, + 331, + 271 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/c667c273e0d72e65f57befa30d7eac7de39e3dd5500f8b8982bf964a2af0bfd0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 162, + 493, + 271 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/a52dd1eb6b418ed5ae687d895e95de05feea7dd01a9e70453d3ef0959530bc3a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 162, + 650, + 271 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/58dcd2fc462e78a9dc9f7d7d52dd5084aa6aafb0d48039e3f826d0102bca067b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 164, + 803, + 271 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/55dc8086726bc727505f262393f9f58520886c8655e9a928d6c1c6186955bbfd.jpg", + "image_caption": [ + "Input Text: \"Change this into a cubist painting.\"" + ], + "image_footnote": [], + "bbox": [ + 191, + 291, + 333, + 402 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/d6c8b55a284c54268e71caa1c9af85186c073992a08e7df944a4827df416126b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 291, + 493, + 402 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/208b4450c1e449eb5c1aec21ac1e69f0930c6ad52d64be0770fb28058b41a6de.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 291, + 648, + 401 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/c7a9ad7f6f46ee29f8dc6c06aea221cccb760335b906b633f9630413215ff700.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 665, + 291, + 807, + 401 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/28ce32f62ab276444f6b71d92bf45eedf3ce7200dfed382d7460f96631f73071.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 192, + 422, + 334, + 531 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/662a88ccc330476d2b5521ea30b676e65a0af1c7ce47b24527e5328cb5d23989.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 422, + 491, + 530 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/4f6fc9b2404193b922604fdb04aa7ba553fa2808adea7ae850000fdbcf1459d2.jpg", + "image_caption": [ + "Input Text: \"Make the image appear as if it's a woodblock print by Hokusai.\"" + ], + "image_footnote": [], + "bbox": [ + 509, + 422, + 651, + 532 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/3ae48117c4aba996216a7bc491ef96a7ef76812eb31cfce77a2b900f543504dc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 665, + 421, + 807, + 532 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/e58eec2c9254e2da8414f358c4e5148a84d45f6a54957dab8cb6f162f5233635.jpg", + "image_caption": [ + "Input Text: \"Change the background to Fushimi Inari Taisha.\"" + ], + "image_footnote": [], + "bbox": [ + 192, + 550, + 334, + 652 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/cca5f714390c8193eca135b0bd9a7e4da0d407fca7b6a93aba9022833c487d0b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 550, + 493, + 652 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/742bbe6bc7ddca81a49887d3b626885d9ffe8e90e72e044edaaa388cf541ef05.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 550, + 651, + 652 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/2c704075d95d45fe4f0f7a7b678bfc45ee73af1834623f4fe6e02497499106c1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 551, + 805, + 652 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/7397b99d3dbd4f1f9b6ee30e701e62c0f7e12919bfa41da478790936477b2c34.jpg", + "image_caption": [ + "Input Text: \"Make the image appear like a Rembrandt painting.\"", + "Figure 18: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MagicBrush [120]. Observations: This set of examples further demonstrates GPT-4o's robust capabilities in style editing and background modification, consistent with the findings previously presented in Figure 16." + ], + "image_footnote": [], + "bbox": [ + 192, + 671, + 333, + 781 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/f4667249d38b5a17f284e530b43ccc19dae4b4df74a383b000d041b25e9cc575.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 672, + 491, + 780 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/a0db75620e3e1969b865f1e6ae4a41bb07983621337b59c87edc39c85287a2dc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 672, + 648, + 779 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/5f20c93306fe70d43acd29c720c491e8d5d808966bbaa918395e1a87b2755d64.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 665, + 672, + 805, + 780 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 220, + 805, + 303, + 819 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 395, + 805, + 444, + 816 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 524, + 805, + 630, + 816 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "MagicBrush", + "bbox": [ + 696, + 805, + 772, + 818 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Image Editing", + "text_level": 1, + "bbox": [ + 218, + 107, + 320, + 123 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/fd02f523b32582d773f60da26889163fcfeb85e2ad61c1cbbf337661f949aa5b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 330, + 132, + 352, + 148 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Evaluation: Instruction-following / faithful.", + "text_level": 1, + "bbox": [ + 352, + 133, + 661, + 150 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/cdc522f7841c2077f741a6f72a86ede9c15c561e6b84d17afd15ae44085b29cf.jpg", + "image_caption": [ + "Input Text: \"Make the image look like a cartoon.\"" + ], + "image_footnote": [], + "bbox": [ + 189, + 160, + 331, + 268 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/d89d446208b65160e03ce4ced290520055aa6e2461f4822a704190db34fdc06c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 346, + 160, + 488, + 268 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/ae6de595371f09d4b568904e9ac0c1c094ae221ff86659caaecc10d0524ac9df.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 161, + 648, + 268 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/bb239bc12ad173112db53edc8c9ee898e6ae74bcd56f4a9d4d2f4e2aa118d2f3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 161, + 803, + 268 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/777ec73ae37e58c0b5c2111e81cad23a6a071d747be202d05b8e482979d29b98.jpg", + "image_caption": [ + "Input Text: \"Change the bike frame to be shiny metal instead of red.\"" + ], + "image_footnote": [], + "bbox": [ + 192, + 292, + 331, + 398 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/e4187d7ed6a1b514ec5676db3d2003f937bfa937564597d410716eae507bb1ca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 292, + 490, + 398 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/eb16bd445147255cf3980142ad981069b415244e78b0a5cc390ff3c9e7af61b2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 292, + 648, + 398 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/7b45aa773deb1657966b4ed79bd2b961c599f2a4ba279796de04e8ddce076662.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 292, + 803, + 400 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/6a5555b889ead9b7c54a679f8554efb5478d55c2bbab8a2783cd24e9f0e28abb.jpg", + "image_caption": [ + "Input Text: \"Change the table color from blue to black.\"" + ], + "image_footnote": [], + "bbox": [ + 194, + 425, + 334, + 531 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/c86c4b988057b5e5ca08b92e4c09d8111a097f908d27efcd8da7c2ac29c5583a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 356, + 425, + 495, + 531 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/fafbcde737a7a7fc8337bc987984a76a03a10ed89b37798dab11796bc6e03f52.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 425, + 650, + 531 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/d02f5e582a519b9d44e58b3ed2a49efbf23380fc85233c22949e12e735fc0378.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 425, + 805, + 531 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/ee337d58007e700256aa61fb4d68f998f7a15c4a2662d8db4f8d622da1b1e1fb.jpg", + "image_caption": [ + "Input Text: \"Change the woman's hair to be all blue.\"" + ], + "image_footnote": [], + "bbox": [ + 196, + 554, + 336, + 656 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/c9364ac74a276a68b6a4096cb5d76160b136e7879dc4af294489f3c2d5723738.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 354, + 555, + 495, + 656 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/91b32ed2b4041faded4ca926aa6a533ba22ec3c950274f75ac5ae3556ef420f0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 513, + 555, + 653, + 656 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/50fb798f21796fa55105c09f2e99d39e009e8f0617af0664a4836dbe70e314d0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 555, + 808, + 656 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/91219e5a27a8f4d0eede45256f0b86e5561a80dee1a5c603cab39235011f504d.jpg", + "image_caption": [ + "Input Image" + ], + "image_footnote": [], + "bbox": [ + 196, + 681, + 336, + 787 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/7e6bb403295173dc6c676365988dcd5fd4a615b8d4e09058c0895add12fbd47c.jpg", + "image_caption": [ + "Input Text: \"Make the color of the airplane be yellow instead.\"", + "GPT-40" + ], + "image_footnote": [], + "bbox": [ + 354, + 681, + 496, + 789 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/2c0a4cd420ef9d5e4634c2962449526f8adf2f9dc3c5b4ca6622d4b56c4d08ad.jpg", + "image_caption": [ + "Gemini 2.0 Flash", + "Figure 19: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MagicBrush [120]. Observations: Example 4 highlights GPT-4o's superior image understanding—accurately distinguishing between hair and a scarf (where MagicBrush fails) to execute the edit. In Example 5, its precise retention of the plane's logo and text further demonstrates robust object-preservation capabilities." + ], + "image_footnote": [], + "bbox": [ + 513, + 681, + 656, + 790 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/217541d90806a38fd995d3e22236061f85eb7bc63300a2acde2bd0f1910bb8aa.jpg", + "image_caption": [ + "MagicBrush" + ], + "image_footnote": [], + "bbox": [ + 666, + 681, + 805, + 790 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "2.2.3 Customization", + "text_level": 1, + "bbox": [ + 127, + 90, + 282, + 104 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Customization, also known as subject-driven generation or personalization, aims to enable visual generative models to generate visual concepts from given reference images. Initial methods [31, 91] have achieved this by optimizing text embeddings or model weights. Subsequent approaches [50, 36, 46, 125, 94, 129] expanded on these approaches to handle multiple visual concepts. Customization plays a crucial role in making visual generative models more flexible and applicable across diverse domains. By empowering models to adapt to user-provided inputs, it ensures outputs are tailored to specific visual concepts. This is particularly significant in industries such as artistic creation and advertising, where individualization and creativity are paramount.", + "bbox": [ + 124, + 114, + 870, + 212 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "To evaluate the performance of GPT-4o in this challenging task, we collect reference images from previous relevant works [130, 103], and conduct qualitative comparisons as shown in Figure 20 and Figure 21. For single-concept customization, we compare GPT-4o with Gemini 2.0 Flash and DisEnvisioner [130]. The results demonstrate that GPT-4o not only faithfully reproduces the visual concept from the reference image but also accurately adheres to the given textual description. In this task, GPT-4o significantly outperforms Gemini 2.0 Flash and achieves performance on par with the SOTA customization method. However, the images generated by GPT-4o still exhibit some \"copy-paste\" artifacts, leaving room for further improvement in the future. For multi-concept customization, we compare GPT-4o with Gemini 2.0 Flash and MS-Diffusion [103]. In this task, GPT-4o can still achieve competitive results for customizing multiple visual concepts in different contexts. Unfortunately, it struggles with certain unique combinations (e.g., making a dog wear a human dress), which could be attributed to the lack of relevant customization training data.", + "bbox": [ + 124, + 218, + 870, + 371 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Overall, GPT-4o demonstrates impressive performance in both single-concept and multi-concept customization tasks, showcasing strong concept fidelity and great text alignment. Despite some limitations, GPT-4o achieves remarkable results on par with SOTA customization methods and outperforms Gemini 2.0 Flash.", + "bbox": [ + 125, + 376, + 870, + 420 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 934, + 508, + 946 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Customization (Single concept)", + "text_level": 1, + "bbox": [ + 174, + 133, + 289, + 165 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/646e79ac9f1f81f041625ce775812c9530257492f90e5afea41349ce5cd6894a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 174, + 174, + 197, + 194 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Evaluation: Corresponding visual concepts of given reference images.", + "text_level": 1, + "bbox": [ + 199, + 179, + 795, + 198 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/7b9ec0dd32124c06ec282d499d80f72d58efd6c6b8339cfbdfe41eea456580d6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 150, + 203, + 318, + 332 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/29e4e012635440b4b8fd5440387c6bcf7b8f25f3e0e7c0ea56e381f52b1a1451.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 203, + 488, + 332 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/f596fda615ad13c09d3de7a59c67e2adf0d99aa3880879a7dfca06b8ddd317e8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 493, + 203, + 661, + 332 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/ed7f35bce8d09ae0a591d4b7e14edd5bbe5913c727af2763219ee8eccf546612.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 203, + 834, + 332 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Input Text: \"A dog on top of a purple rug in a forest, with reference to the attached image.\"", + "bbox": [ + 156, + 340, + 805, + 372 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/c337d9db266ab1888bf435abcc2ea4dfede05853a0b6a6b5906b13f424c95dc6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 150, + 378, + 318, + 510 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/be6c16bf461ad6ab94168d3e79ef58fabfe4403b526f0dc3a29bc696d3d3531e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 378, + 490, + 508 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/58f6525436f8706212ce32abd6aa2f4ba0d58a7cd53654d0f24fecf81b3fb9cb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 493, + 378, + 661, + 508 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/b05322a8b0f75be84fd68e7685029fe222a8456412836c464982ecfc2b379f16.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 378, + 834, + 510 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Input Text: \"A cat wearing a Santa hat, with reference to the attached image.\"", + "bbox": [ + 156, + 518, + 761, + 535 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/2766a0e15a98adf29692d023a9e4c39fef5c3e5ec591c397ca4c3600d9eb3690.jpg", + "image_caption": [ + "Figure 20: Task: Single-concept customization. The goal is to generate images that faithfully reproduce a single visual concept from reference images while aligning with a given textual description. Setup: Reference images are collected from prior works [130], and results are compared across GPT-4o, Gemini 2.0 Flash [99], and DisEnvisioner [130]. Each row includes the input reference image, text prompt, and the corresponding outputs. Observations: GPT-4o demonstrates strong performance in faithfully reproducing the single visual concept with high fidelity while adhering closely to the given textual description. It consistently outperforms Gemini 2.0 Flash and achieves results comparable to the SOTA method DisEnvisioner. However, some generated images still exhibit minor \"copy-paste\" artifacts, indicating room for further improvement." + ], + "image_footnote": [], + "bbox": [ + 151, + 545, + 318, + 674 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/b6d6e46c7e1fd50306d7fb544b81d3bcbe88cbf2672995aac7590c083fadec72.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 545, + 488, + 675 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/decdf079eb5a7ceb8546733501c7687db436872547911348307fba42a262e86d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 493, + 545, + 661, + 675 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/27b3efd0800340d8acae187e104fb4c863298773ee0e13233df9fd4a8ff2e810.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 545, + 834, + 675 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Input Text: \"A pair of glasses with a tree and autumn leaves in the background, with reference to the attached image.\"", + "bbox": [ + 156, + 678, + 800, + 710 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 184, + 718, + 284, + 734 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 375, + 718, + 433, + 733 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 516, + 718, + 640, + 733 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "DisEnvisioner", + "bbox": [ + 699, + 718, + 803, + 732 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Customization (Multiple concepts)", + "text_level": 1, + "bbox": [ + 156, + 102, + 305, + 132 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/9f7208b84dbf5365d061b28896f055f01299d12b30af1dee82093163431a17e5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 183, + 138, + 204, + 157 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Evaluation: Corresponding visual concepts of given reference images.", + "text_level": 1, + "bbox": [ + 205, + 143, + 803, + 161 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/2982b017c59027bd916adf4c71fec2ef4c7b20ea6fcb1b3e05ccead2d64e7e70.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 151, + 167, + 287, + 273 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/a43b10404face144826ade37cce23e7383b09b5f6616cd1ba4fb27d65847035f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 289, + 167, + 421, + 273 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/08504d0ac60ae0b5850f6667bc744a4e3d743395a161f0de6a775051bb6f7278.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 421, + 167, + 555, + 273 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/6217bbdb7e9c6d3d88bbced40c11ad8ad80d6cc7c0af8b951f80ab29474cc67e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 555, + 167, + 691, + 273 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/b359a631d7b7dee251d05aec14e46acaab4300aca8d16ae17240156e943e2d83.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 691, + 167, + 828, + 273 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Input Text: \"A dog wearing a dress in the snow, with reference to the attached images.\"", + "text_level": 1, + "bbox": [ + 156, + 282, + 828, + 301 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/7e7b185220c6a783809189a0317a2ebff5e610e53e31f229bc61cc66cb9833ff.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 151, + 309, + 285, + 412 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/cb7b5802882687334bb83b37842a711b7d6066ee9b162b7773203fa931b58dae.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 285, + 309, + 419, + 412 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/24acb3bb969c56cf9068bc75a47163bb4b2fd5cfb83a6ae51eec791707338fa8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 419, + 309, + 553, + 412 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/f93e5e0c608562728e8855f8158f8f8dfca8a94d4114bd194f042654bffe10c0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 553, + 309, + 687, + 412 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/9e7f0f5aebdeb1b02b10a56ee4521570fb0755150225325cdd221a08f72aeae6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 687, + 309, + 821, + 412 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Input Text: \"A flower with a barn in the background, with reference to the attached images.\"", + "text_level": 1, + "bbox": [ + 155, + 420, + 803, + 452 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/aab60cd3ceb814eae695e39c17c9ede66cb9a44a4700601d9febe20289cdba35.jpg", + "image_caption": [ + "Figure 21: Task: Multi-concept customization. The goal is to generate images that effectively combine multiple visual concepts from reference images while aligning with a given textual description. Setup: Reference images are collected from prior works [103], and results are compared across GPT-4o, Gemini 2.0 Flash [99], and MS-Diffusion [103]. Each row includes the input reference images, text prompt, and the corresponding outputs. Observations: GPT-4o achieves competitive results in combining multiple visual concepts, showing strong fidelity to individual concepts and alignment with text prompts. However, its performance declines with unique or complex combinations. Despite this, GPT-4o outperforms Gemini 2.0 Flash and achieves results on par with SOTA methods." + ], + "image_footnote": [], + "bbox": [ + 151, + 459, + 285, + 561 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/b7a34b10991c1b9ad9cd6f27bad980e361343453a99acd05c8354db9560287b5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 285, + 459, + 419, + 561 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/ab685c4d565c19e94d0af9de2b15ce72d5e4d59e9fb5279055cc29a626f1a509.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 419, + 459, + 553, + 561 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/e4a1fb76e31c6e6095d35539c83419338ff04dfd9cc1cb7d1e9c18ca4ecabfab.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 553, + 459, + 687, + 561 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/de9e123e1e1147256977150c0054e8e9a10a47b47074aa699979adc065b6358c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 687, + 459, + 821, + 561 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Input Text: \"A backpack and a stuffed animal in the jungle, with reference to the attached images.\"", + "text_level": 1, + "bbox": [ + 156, + 566, + 779, + 599 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 173, + 604, + 258, + 619 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 308, + 604, + 393, + 619 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 464, + 604, + 513, + 618 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 566, + 604, + 674, + 618 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "MS-Diffusion", + "bbox": [ + 705, + 604, + 795, + 618 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/d617caa5b4e6e92b1428018122e2e89713f4978830db0e21a7ee846b88ce3163.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 151, + 633, + 259, + 722 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/896b8444337aa0e1e16c6afd486c4b4f5296517582f2b9dbf6bc16114d53d2ab.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 261, + 633, + 374, + 722 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/8217fbf0fc579e87f7d27802daffe76c3b9b1cdc80d7434cf08598ef201ca65b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 375, + 633, + 488, + 722 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/e3951f420a6bebec8d982ae86c6d63b89a8e9d80db44d4a5af3d22ff2a0dabf6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 488, + 633, + 598, + 722 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/0a9ad99bf92f46485bb850e72e6f3103fbdbc9cd19afa549dae69229296ce436.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 599, + 633, + 712, + 722 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/1e13756c7b144754ea9efd5eae0b5683845af5444f13a90c830d2492dfbd9bbd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 712, + 633, + 825, + 722 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Input Text: \"A lantern, a clock, and a backpack on a cobblestone street, with reference to the attached images.\"", + "text_level": 1, + "bbox": [ + 156, + 726, + 823, + 758 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 158, + 768, + 243, + 784 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 272, + 768, + 356, + 784 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 385, + 770, + 468, + 784 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 514, + 770, + 563, + 782 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 596, + 770, + 705, + 782 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "MS-Diffusion", + "bbox": [ + 720, + 770, + 812, + 782 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "2.2.4 Story Image Generation", + "text_level": 1, + "bbox": [ + 127, + 90, + 351, + 107 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Story image generation is a task to generate coherent stories based on input text narratives. The conditions may also include the first story frame or character images. We choose Gemini 2.0 Flash [99], StoryDiffusion [38], SEED-Story [111], and DiffSensei [108] as baselines, due to their proven ability to generate coherent and expressive story images and their public availability. The results are shown in Figure 22 and Figure 23.", + "bbox": [ + 125, + 114, + 870, + 172 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "In the first example, GPT-4o and StoryDiffusion successfully generate a three-panel short story about a fisherman, whereas Gemini 2.0 Flash fails by producing a single panel that appears to combine the three story narratives. In the second example, the story narrative is longer, spanning 11 panels. To evaluate this scenario with GPT-4o, we instruct the model to generate story images sequentially—using the input image and all previously generated images along with the corresponding text prompts. As shown in the figure, GPT-4o is capable of generating a long story with consistency. In the final example, we examine a Japanese black-and-white manga style with multiple input character images. GPT-4o is able to generate coherent stories, though it exhibits minor errors in character consistency (notably with the depiction of the woman) and misalignment with the input narrative (the narrative requires 7 panels, but only 6 are generated). The baseline Gemini 2.0 Flash performs worse, failing to preserve character status and the correct number of panels, as it also produces only 6 panels. Conversely, the DiffSensei model demonstrates superior performance, likely due to its specialized design and training for Japanese black-and-white manga generation.", + "bbox": [ + 125, + 176, + 870, + 343 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "In conclusion, while GPT-4o achieves comparable performance to current baselines in story image generation, it shows limitations in specific scenarios—such as Japanese black-and-white manga and precise character status preservation—when compared to methods specifically tailored for those tasks.", + "bbox": [ + 125, + 349, + 870, + 392 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Story Image Generation", + "text_level": 1, + "bbox": [ + 169, + 143, + 361, + 159 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Evaluation: Subject Consistency.", + "text_level": 1, + "bbox": [ + 357, + 169, + 640, + 189 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/9f84d4d4f227df36ae2d8eb29101d274a1877c545a2468371a5088741bc3cd8f.jpg", + "image_caption": [ + "GPT 40" + ], + "image_footnote": [], + "bbox": [ + 156, + 196, + 344, + 282 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/821b29da41b0b8024feaf0f1d198abc77e613c7dd6beb41244e3ee04340d25a7.jpg", + "image_caption": [ + "StoryDiffusion" + ], + "image_footnote": [], + "bbox": [ + 352, + 196, + 464, + 282 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/40ffe89df9c50ac3b8034fed7000e9df015d18b39c605d05fc2cb755261800bb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 467, + 196, + 578, + 282 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/7c0c2c96243ce0e8cc18e9a4df91dbe8e7777df73105a2ceb18d177a514d9735.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 581, + 196, + 692, + 282 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/73cf810872486276d591d5946a9ef3dc5549af56b3140dbc1d46cd76cf11381f.jpg", + "image_caption": [ + "Gemini 2.0 Flash" + ], + "image_footnote": [], + "bbox": [ + 699, + 196, + 838, + 282 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Input Text:", + "text_level": 1, + "bbox": [ + 168, + 311, + 259, + 325 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "\"Draw a story about:", + "bbox": [ + 168, + 325, + 318, + 339 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "An old fisherman in a cable-knit sweater and boots", + "bbox": [ + 168, + 339, + 532, + 353 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Laying out a picnic solo", + "2. Rowing a boat at dawn", + "3. Stargazing with a telescope\"." + ], + "bbox": [ + 168, + 354, + 395, + 396 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/9aa5032b866731f07d0e37da2dd1017f2e4122d3413aa5ce54be7e8880b33931.jpg", + "image_caption": [ + "Input Image" + ], + "image_footnote": [], + "bbox": [ + 158, + 407, + 348, + 555 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/9b0a74f7db3b0699ac94506c383fb22b1af1f2bd911d7fa4312ee9ca289612ef.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 361, + 407, + 442, + 465 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/ea5e48f5561ad6c4fe1193a647524607970cfe7cdc63cd079944cf82880cecfe.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 361, + 465, + 452, + 508 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/d68465b01fad9263615792827618d6bc3ea2ac4994279661c281ea4561a93647.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 361, + 510, + 426, + 553 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/48601f43a53bfc38e48cf7cca862cb1f50808dd99f5c4af851716147ddce8e55.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 444, + 407, + 524, + 465 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/c03c17cff30fad0446b3d264e031d30a0f68b4ac841df4e2880f2381e0399ca9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 444, + 465, + 547, + 508 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/c8ad21ac9388fd5c3e7f05f8a3c2192c73560b7737abef6737af98782aedeab8.jpg", + "image_caption": [ + "GPT 40" + ], + "image_footnote": [], + "bbox": [ + 444, + 510, + 491, + 553 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/85f94fe9cc62a3b120dd03edaa6ef6f69b02976c1603bf7df955e05359a6cb30.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 526, + 407, + 607, + 465 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/1c6fcddb57e3da5cf397a2753308ce3877a635582a5355e1b66875d5fb6b8636.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 526, + 465, + 607, + 508 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/0419b8779eb79040a7d1330971acc7342fec2dd90c7626958522a6fb5f0fcf8e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 526, + 510, + 607, + 553 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/505a6db0b4602aea76566f436b70be7da6453677d5c873c5267b35cc43b24a51.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 624, + 411, + 671, + 450 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/ccf71f9041dc2d2f4458a2e0113d7a09f60467f15834ca798f39accb6bbf34e8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 624, + 450, + 671, + 481 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/bcd70602d56a2f46a9663b8b18aa7be1f8cc70645914af87662cc424f8bd98e1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 624, + 481, + 671, + 507 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/a1117bdf2682b8d7115f12b7af60c126adce2d44e56036f3f9677ed0f6fa8cda.jpg", + "image_caption": [ + "SEED-Story" + ], + "image_footnote": [], + "bbox": [ + 624, + 507, + 671, + 554 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/5daa456ff2e454b0f91f88203eed2a998e30a44e393325bd1cc55a49937d3939.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 411, + 725, + 458 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/073d5da23f5b7f75f55840c7e941a2744edbcb0a242ace828bcaf195e2c9978e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 459, + 727, + 507 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/aa32d16435cd0e8550916c3f311f8776216cf8e0260b722c1418d0d7619083ed.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 507, + 727, + 554 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/a492c7a006b896e7436ce9df3705f82414a33538f91a0c7ba1f64667941a3dc1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 732, + 411, + 782, + 458 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/0abc90b5e3bcbd0cb683654abfcc56966ce72e50c6e2cbbe3c61540b34a5b998.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 732, + 459, + 782, + 507 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/36ad8877a81602152fef02eb279de3452a2d3bcd29e3cca2508e6eb27da3dd21.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 732, + 507, + 782, + 554 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/e9288a0c02092fd53245823396ca1f76b2df5abca1ee697a871b5004f18dce58.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 787, + 411, + 836, + 458 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/aba75b8ad4c326892ca96755807604f259178d54af19a83236adab4115951198.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 787, + 459, + 836, + 507 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/b13f052399947f2c4da3a2882a7878f31fc79514f3fe8a809951a32ca1140118.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 787, + 507, + 836, + 554 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Input Text:", + "text_level": 1, + "bbox": [ + 168, + 583, + 259, + 597 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "\"Draw a story about George, a monkey:", + "1. He looked around with a curious expression, wondering what adventures awaited him.", + "2. Suddenly, George heard a noise. ...", + "3. To his surprise, the noise was George's friend, a small brown dog ...", + "4. George and the dog then played a game of hide and seek. George hid behind a couch ...", + "5. The next day, George and the dog decided to explore the city ...", + "6. George stopped on the city sidewalk, looking up at the sky ...", + "7. George then noticed a building with a reflective glass ...", + "8. George and the dog stood in front of the building, looking up at the lit windows ...", + "9. They were in a room with a door, waiting for their friend to join them", + "10. Suddenly, the door opened, and a man in a yellow suit walked in ...", + "11. He seemed deep in thought, unaware of George and the dog watching him from below ...\"." + ], + "bbox": [ + 168, + 598, + 821, + 768 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Figure 22: Task: Story image generation. The goal is to generate coherent story sequences based on narrative text, optionally conditioned on initial story frames or character images. Setup: Each example combines an input narrative (and, when available, reference character images) with a series of generated story panels. We compare outputs from GPT-4o against Gemini 2.0 Flash [99], StoryDiffusion [38], and SEED-Story [111]. Observations: GPT-4o exhibits strong narrative coherence and panel continuity, matching or surpassing general baselines.", + "bbox": [ + 125, + 792, + 870, + 863 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Story Image Generation", + "text_level": 1, + "bbox": [ + 168, + 220, + 359, + 238 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Evaluation: Subject Consistency.", + "text_level": 1, + "bbox": [ + 380, + 251, + 638, + 268 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/45c315f9d4b26f49ce32764e260ffb24ab385bf338d6917e2d37d6b26b242016.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 158, + 273, + 236, + 340 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/f4ad4ea5d8987493e7a7c6a58f347e56802ce312612d8ea2a144cda053cdf994.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 161, + 340, + 236, + 406 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/084f24a984613e1c05f7c5e2fa9817167a1fbac88e5cd473c3386ae1bd4b9322.jpg", + "image_caption": [ + "Input Images" + ], + "image_footnote": [], + "bbox": [ + 161, + 407, + 236, + 474 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/536ebeb2091f4d9765dfff67b32c88a8b7e2f773ca113d0bf1fe677432f050ca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 253, + 276, + 419, + 337 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/89edb9b677a275a49f6618e18a5f5744c28971461e4544202201e438f92139f3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 253, + 338, + 418, + 402 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/186181b2b63acba5d62a3720407b17286c90fad94c34b8ea5cef23a65cfbf1ec.jpg", + "image_caption": [ + "GPT 40" + ], + "image_footnote": [], + "bbox": [ + 253, + 405, + 418, + 472 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/28bf76b0adb60b3e53902f81e3d33090021fb2e1147bd03fb08fac858f7ac972.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 426, + 273, + 607, + 339 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/9bba0dc289d8ec29ff37642b0bc4d84feafbf8e7b361e6e17186d7b033c6b5be.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 426, + 340, + 606, + 410 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/8ef5824e83d6803a07b614317f4d908c06c18331051e08b8eef6e454c1be8073.jpg", + "image_caption": [ + "Gemini 2.0 Flash" + ], + "image_footnote": [], + "bbox": [ + 428, + 410, + 606, + 473 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/6043908cb4a51385380e40a2264f62f78e48ba2368469bbca5ad6f2dde9482cf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 612, + 275, + 831, + 330 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/94c4c44bd6d4d882958c8eed91e70b3ca5d10dea1f870d92a7661befb223ea99.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 612, + 332, + 830, + 391 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/072676f5a96fdb59a2c5611f9262a740e1f39908971d09d9835789f46d395625.jpg", + "image_caption": [ + "DiffSensei", + "Figure 23: Task: Story image generation. The goal is to generate coherent story sequences based on narrative text, optionally conditioned on initial story frames or character images. Setup: Each example combines an input narrative (and, when available, reference character images) with a series of generated story panels. We compare outputs from GPT-4o against baselines including Gemini 2.0 Flash [99] and DiffSensei [108]. Observations: GPT-4o shows minor shortcomings in precise character consistency and panel count in specialized contexts, such as Japanese black-and-white manga, where dedicated models like DiffSensei deliver superior performance." + ], + "image_footnote": [], + "bbox": [ + 612, + 392, + 830, + 472 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Input Text:", + "text_level": 1, + "bbox": [ + 158, + 512, + 248, + 525 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "\"Please generate a black-and-white manga using the given characters (a young man, a child, and a woman). Each panel may appear 0-3 characters.", + "bbox": [ + 158, + 527, + 831, + 554 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. A man is lying on the floor surrounded by books and papers, with a radio nearby.", + "2. A woman with curly hair is smiling. She's wearing a patterned shirt and apron. She's holding a baby.", + "3. A man with a surprised expression, his mouth open as if he's about to shout or scream.", + "4. A young man with a surprised expression, is holding a baby on his back.", + "5. A man is holding a baby. The man's hair is disheveled.", + "6. A man with a surprised expression. His eyes wide and eyebrows raised.", + "7. A man carrying a child on his back walk up a staircase. The man is wearing a stripped shirt\"." + ], + "bbox": [ + 158, + 555, + 834, + 667 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "2.2.5 Low-level Vision", + "text_level": 1, + "bbox": [ + 127, + 90, + 295, + 104 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Low-level vision tasks aim to enhance the basic quality or detail of visual content by improving various aspects of an image. Initial methods often focused on optimizing single tasks, such as super-resolution [88, 95], denoising [61, 63, 55], restoration [60, 20, 62, 84, 15, 16, 17], color adjustment [59], and more [22, 66, 116, 1, 122]. As the technology progressed, subsequent approaches expanded these techniques to handle multiple low-level tasks simultaneously, which is called universal image restoration. Low-level tasks play a critical role in image generation and editing, allowing visual generative models to provide higher-quality outputs in real-world applications. By enabling models to adapt to diverse inputs, they ensure that the generated images perform well across different visual tasks. This is especially important in areas such as image restoration and video enhancement, where high-precision visual content optimization is crucial, such as in film post-production and autonomous driving.", + "bbox": [ + 124, + 114, + 869, + 239 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "We evaluate the performance of GPT-4o in this challenging task. Firstly, for some image restoration tasks, such as super resolution, denoising, deraining, low-light enhancement, deblurring and dehazing. We collect reference images from previous relevant works Gemini 2.0 Flash and a universal image restoration model, InstructIR [20], as shown in Figures 24, 25, 26, 27, 28, 29, 33, 34. In most scenarios, GPT-4o guarantees high-quality output images, outperforming Gemini 2.0 Flash. However, there are still some degradation issues that are difficult to remove, as seen in the second image of the image denoising task. On the other hand, for low-level image restoration tasks, maintaining pixel consistency between the output and input images is crucial. GPT-4o does not perform well in this regard, as the content of many images changes. In contrast, InstructIR, designed specifically for image restoration, performs better, effectively removing degradation while maintaining pixel consistency throughout.", + "bbox": [ + 124, + 244, + 867, + 371 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "For image inpainting and outpainting in Figure 30, 31. We compared Gemini 2.0 Flash with the latest inpainting and outpainting methods [66, 116, 22, 1]. Only the missing information needs to be completed, but GPT-4o still changes the undesired content of the image. Although the output image quality is higher, this is not ideal for evaluating the task itself. For human face inpainting, compared to the other two methods, the overall artistic style is more natural. For the colorization, we choose the latest colorization model CtrlColor [59]. The overall style is somewhat dark in Figure 32. Compared to Gemini 2.0 Flash, GPT-4o's colors are more natural and consistent with the style. However, there are some inaccuracies in color control. For example, in the second image, the cat's color is not white as specified in the text. Additionally, GPT-4o still exhibits issues with changes in image content, such as the shape of the human's face in the fourth image.", + "bbox": [ + 124, + 376, + 867, + 501 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "For the image re-lighting task in Figure 35, GPT-4o performs well in applying realistic lighting and shadows, with natural color tones that match the scene. However, it occasionally struggles with maintaining light consistency, particularly in complex lighting scenarios, such as neon or vibrant lights. Compared to Gemini 2.0 Flash, GPT-4o produces more natural and consistent results, but it doesn't always accurately replicate the lighting effects as seen in the second image, where the neon lighting could have been better captured. IC-Light [122] is effective in applying realistic lighting, but tends to lose detail in some complex objects or faces under different light conditions. Overall, GPT-4o is a strong contender for the image re-light task, providing good light consistency but leaving room for improvement in some specific scenarios.", + "bbox": [ + 124, + 507, + 867, + 618 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "In summary, GPT-4o demonstrates strong performance in various low-level vision tasks, often surpassing Gemini 2.0 Flash in output quality with more natural and visually appealing results. However, it struggles with maintaining pixel consistency and avoiding undesired changes to image content, which are critical for tasks like restoration and inpainting. While its adaptability and realism are impressive, there is room for improvement in precision and task-specific consistency compared to specialized models like InstructIR and IC-Light.", + "bbox": [ + 124, + 625, + 867, + 695 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Denoising", + "text_level": 1, + "bbox": [ + 217, + 98, + 289, + 113 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/1a4731938b542638df6ad94c956073db2b548c15e4b1b8114d5307a10c99ea73.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 385, + 118, + 403, + 136 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Evaluation: Image Quality.", + "text_level": 1, + "bbox": [ + 405, + 122, + 601, + 137 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/0a89af2a4699039f927a01b35f74a721a10ddc2eb096385ea8edf582d6c03d50.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 143, + 330, + 257 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/1beb76da0b2a004a33d76279d80831d9cbb3917b5daee0d2aa13d05feb493089.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 143, + 491, + 257 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/174139ab6dcc12cd6a99f039cfe71445d47612cb61b456c34e139a4d44ce5b27.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 143, + 656, + 257 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/25c6f8568e7bcdc44e6309534f95b1a0b59bb18d7773a69a2d061ee65e297860.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 143, + 820, + 257 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/25046cf08634831335f437686635f7b1affeb5cf872dfd9bdeef73ad97a4f786.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 262, + 331, + 417 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/0a2a55819ee33dc9ba77d52891f0222654f300378e0c315f91df125d5feab4ef.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 262, + 491, + 417 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/34f3e69973949e582903e9064b4ccc8b319f75b359178b7208421be704e55d45.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 262, + 656, + 417 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/46dec341162765728e1fc8f0cd002ebf4ee74f97cd953e898c4270f10f2a5328.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 262, + 820, + 417 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/47297a815dfe05f13941cb00e869c4a19a92d4533649e2a583bafd94faf893c9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 424, + 331, + 525 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/9f7537d877ef9e59c4b7d2865b5b20522610acea27d5295f2b7a540a7f158aa1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 424, + 491, + 527 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/4425fde2f8933a0d0a27e3c8934be18f51d1e042682ca256127948d03c7d8026.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 424, + 656, + 527 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/29f5e73b544db2cde87a7ae619bda23d4e11cc25cfb4269634f02ce791227353.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 424, + 820, + 527 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/6b407e152dd4b4cace6037d3643ed856c1e206632e4bbccf517c4fef9f21d5ff.jpg", + "image_caption": [ + "Figure 24: Task: image denoising, aiming to remove the noise information and obtain high-quality clear version. Setup: We compare GPT-4o with InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the denoised images. Observations: GPT-4o can restore high-quality denoised images. Except for the second image, where the noise cannot be completely removed, the other images are free from noise. However, for low-level tasks, GPT-4o does not maintain content consistency well — the background colors and object shapes in many images have changed, such as the background color in the first image and the floor in the fourth image." + ], + "image_footnote": [], + "bbox": [ + 178, + 532, + 331, + 625 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/180b6d2fbe08d1106795eea1f9a5165620b7ff33122c84b0e0c53f5d47bd52a9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 532, + 491, + 625 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/4e2bd472765d312f5f2fa59f23f64b308cadfa60ddeaef7c74ca0effcf81c1de.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 532, + 656, + 625 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/345956a47d1ba18bc47ef420be35f2680ee92364e251cc4f0c85890f41d35a52.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 532, + 820, + 625 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/ea1923c868229c788db11eb4c4c0a0c7fdfff3244b7867c71f36ecc71509d140.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 632, + 331, + 761 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/0d95f3d9522c913d7fb63c52805b36252551d9de67bab8edf492afd706660e7d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 632, + 491, + 761 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/2865da09f43ccfb7bad4c1a38f720f8a7f22179ea35f92a1539146184df21cd6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 632, + 656, + 761 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/273a3c5b1d1bd94335184aafbed95bd03cde1025a92f44aa700ed255ea3c88e5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 632, + 820, + 761 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Input Text: \"Remove the noise, make the image clear.\"", + "bbox": [ + 184, + 763, + 573, + 779 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 207, + 785, + 299, + 800 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 385, + 784, + 437, + 796 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 521, + 785, + 638, + 797 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "InstructIR", + "bbox": [ + 700, + 784, + 781, + 796 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Deraining", + "text_level": 1, + "bbox": [ + 196, + 190, + 271, + 207 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Evaluation: Image Quality.", + "text_level": 1, + "bbox": [ + 387, + 214, + 622, + 232 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/c33450ac171eaded049f91066f0deeba4af786415bc6dd5d653dc55c54b3ee41.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 158, + 239, + 325, + 328 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/e15b31d827df49078bd8ff4b5a3c0b7e9766b7e0ce33322d4b3e8fa24173ca40.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 239, + 496, + 328 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/4bd0fc1b08f65d2b90caeb19644fda48d0c14d56f7a3b6d5b0258145a86adade.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 239, + 671, + 328 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/dccfcb457f6ecc58a1354c97e2786be9af3ddba306e9c14119bc34b83281433c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 239, + 844, + 328 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/e7da05826e2092da3f8c786eaba5c6a6330ab3558f6720c639f754e67843897b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 158, + 335, + 323, + 422 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/bb6ced5f0c7f3239e75cdaf62739d7bd88f4e2dea5220e63e06fbbda019b0215.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 335, + 496, + 421 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/a0be173027e50abb4eeaa3a09379cf5495e4baeb4d07b8cfe3c3fbfc7f49da66.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 335, + 671, + 422 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/902fc2f31952ca02a07900b777fb797fe5708419dadaf611ebdbe7c0305895cd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 335, + 844, + 422 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/53b1fed05e6b1e6eefadf734ccb916b0779e1b0b4d17f572e0bed7be77130d6c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 158, + 430, + 325, + 515 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/45630d882ee83bb0a62e1013d838d630afca2a25e99127c4ab251b2d79d6652d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 430, + 496, + 516 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/7e92a2b49a69f9d2edbf84340e1fa8c4dbc2cf0e60655a02c6d49818f70c5d3d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 430, + 671, + 517 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/a8bf6e323c48a02f642dd0e8b8ab2bf45779e5f873173853590a980210b6b42b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 430, + 844, + 517 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/536e0b422f449656c91454149f3446fde812ec0f995fcf5bbe9e0a43e2d597fa.jpg", + "image_caption": [ + "Input Text: \"Remove the rain, make the image clear.\"" + ], + "image_footnote": [], + "bbox": [ + 158, + 522, + 325, + 630 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/d900d2a2f4a7e03bc322a876b3c802692357c1fec98fb4a0bdfe6de558f8f44a.jpg", + "image_caption": [ + "Figure 25: Task: image deraining, aiming to remove the rain streak and get high-quality clear version. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the derained images. Observations: The overall performance of the GPT-4o is well. However, the model struggles with maintaining content consistency in low-level visual details — for instance, the polar bear's background in the first image becomes unnaturally pink, and the underwater scene loses depth and clarity. The flowers also appear altered in color and arrangement. In contrast, InstructIR demonstrates the most consistent performance across all examples, effectively removing rain while preserving the original scene's structure, color, and composition. Overall, InstructIR is the most balanced and accurate model for image restoration in this comparison." + ], + "image_footnote": [], + "bbox": [ + 331, + 522, + 496, + 630 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/4f7cd151bdab47a69f14cb0fefb2b225b07676d7de97906f3f09e608ae362d1f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 522, + 671, + 630 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/a1cf0fd8a07f055d804221a00dac3ab6196ba65037b7980ae7d6ac1150ef8152.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 522, + 844, + 631 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 191, + 661, + 289, + 678 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "GPT40", + "bbox": [ + 385, + 662, + 442, + 676 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 521, + 662, + 648, + 676 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "InstructIR", + "bbox": [ + 715, + 662, + 803, + 676 + ], + "page_idx": 35 + }, + { + "type": "page_number", + "text": "36", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Dehazing", + "text_level": 1, + "bbox": [ + 197, + 215, + 269, + 231 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Evaluation: Image Quality.", + "text_level": 1, + "bbox": [ + 380, + 233, + 614, + 253 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/0b161ea7ba45146ed5301977dbc4153cddf37d20e4042c25caf795a6761d7b64.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 163, + 258, + 320, + 351 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/d9fef0de375f6e8e87b378aa29d3ee9d9b91df21b84ed0eb8eaf780d387eb2f7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 333, + 258, + 488, + 351 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/2f53f480f09632c6f3fce2f380303d7ddfff3462b4f02b3fbd217d4b98cd7bfb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 258, + 658, + 351 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/1dcf8e8781f2caee3cccfcec05a958f963d7001ad1378b322b20be96f757857e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 258, + 831, + 351 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/192fef7db091bb823a304562be65f9026029dbec8cd7557e21fc725cf7a16820.jpg", + "image_caption": [ + "Figure 26: Task: image dehazing, aiming to remove the haze information and get high-quality clear version. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the dehazed images. Observations: GPT-4o performs moderately well in dehazing, managing to restore clearer structures and contrast in most scenes. However, its outputs often have a grayish or desaturated tone, especially visible in the second and third rows. Gemini 2.0 Flash produces more colorful results but tends to leave some haze behind, leading to a less crisp output. InstructIR outperforms both, offering the most visually natural and sharp dehazing across all examples while preserving original colors and details. Overall, InstructIR demonstrates the strongest capability in removing haze while maintaining realism." + ], + "image_footnote": [], + "bbox": [ + 163, + 363, + 320, + 465 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/fa73cd36deac40a3a5de512bdd640d92cbdd918be0feb815b9659870541d646f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 363, + 488, + 465 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/1f7f8dc7c95580fa7127ddf404e77ee13666d31a7cb6fd3eb7e66943921a35a7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 363, + 658, + 465 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/2b91fde29b7d65bb3965853dad07b3b346f976065009ddf3ee693efe1429b9de.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 363, + 831, + 465 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/6487fa3903724c091ac75d960053975fd920521719237fbe538abf890e5b7d14.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 165, + 478, + 320, + 601 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/b57c9edcd60d270e665488237cb0979f4c4725a3a0c7e8682695c4bab96be517.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 478, + 488, + 602 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/68285fa202a5fdaa42f6a5b5c9a5ad11300451918903881288401c4c6d69b8ef.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 478, + 658, + 602 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/7dd57547ecd2c2e9b99488cd75ff2d9c6688588f611f0c7b10caea435b7cf9dd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 478, + 831, + 601 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Input Text: \"I took this photo during a foggy day. Can you improve it?\"", + "bbox": [ + 168, + 604, + 712, + 622 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 194, + 628, + 292, + 645 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 379, + 630, + 436, + 643 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 514, + 630, + 640, + 643 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "InstructIR", + "bbox": [ + 709, + 630, + 795, + 643 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "37", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Low-light Enhancement", + "text_level": 1, + "bbox": [ + 148, + 263, + 331, + 282 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Evaluation: Consistency.", + "text_level": 1, + "bbox": [ + 370, + 292, + 586, + 313 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/a84122365c616f0e518763e95e8748954778d1d6f37580aebe6d9fcdb6dbdefd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 163, + 319, + 320, + 402 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/a5bc8e52d7842f4252e539ba34adcea369c6d11bb3eaa72ea23966ff1c26f6e2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 319, + 488, + 402 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/c090907878b87d590ef3ced70007db5dbbe66202ce9b1e00122e2b8df532d6b6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 318, + 658, + 400 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/995ae0408307d51c1fe4b22c7386e33f12f5f2de57aab5e0c882854a62a49b4a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 318, + 828, + 400 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/d9be1ffaecc96d29076914277e7c30d3db8c4735a052211f5df25b329509f3bf.jpg", + "image_caption": [ + "Figure 27: Task: low-light image enhancement, aiming to increase the brightness of the image to obtain a high brightness image. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the brightness images. Observations: In low-light enhancement tasks, GPT-4o can brighten images and recover basic visibility, but often introduces unnatural lighting and loses detail, especially in the second row, where the image remains overly dark. InstructIR consistently delivers the most balanced results, enhancing visibility while preserving true colors and textures, making it the best performer across all three examples." + ], + "image_footnote": [], + "bbox": [ + 163, + 410, + 320, + 491 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/7c783389b6755b3395ca901bd7e2c2f77ed95615d85ca58ae5a381dd0a4cbd5e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 410, + 488, + 491 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/1a5b4b1774fdc23750f023c71c85c56ea578acf01ebde2ea68333a166999bf31.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 410, + 658, + 489 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/59b7eab0c07c081369aaeb8fc99ee1be38d0f888691576752368cada60c2dfd5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 410, + 828, + 491 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/bdfaed21c06e793694872cf312fb039cb068ab6d61c502ad781bc11ec2d4c06a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 163, + 500, + 320, + 582 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/7ea976f449a8043690de06844c2b94aafce18ca58e6beea0e7e26544e3ab2899.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 333, + 500, + 488, + 582 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/c00e98505d0ec6544c9ab3f147c4ec2eb644ae9be7a7e5bf1a8d123c564b6686.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 500, + 658, + 580 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/2043ba7207ad0d9c159f8b1abd4cb8a7575070b2f4ddccfd301508c5cbc7ce5a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 500, + 828, + 582 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Input Text: \"I took My image is too dark, I cannot see anything. Can you fix it?\"", + "bbox": [ + 173, + 585, + 790, + 604 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 192, + 616, + 292, + 633 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 382, + 616, + 439, + 630 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 517, + 616, + 645, + 631 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "InstructIR", + "bbox": [ + 709, + 616, + 797, + 630 + ], + "page_idx": 37 + }, + { + "type": "page_number", + "text": "38", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Debluring", + "text_level": 1, + "bbox": [ + 196, + 232, + 274, + 250 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Evaluation: Image Quality.", + "text_level": 1, + "bbox": [ + 377, + 257, + 614, + 277 + ], + "page_idx": 38 + }, + { + "type": "image", + "img_path": "images/85f4dbb3e1869fc684c51a8d3da40afdad3cedebd548adaef1a8346cf9e23270.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 155, + 284, + 323, + 382 + ], + "page_idx": 38 + }, + { + "type": "image", + "img_path": "images/6b9df00e9d9d77d03f6e7d0327a679c41ab300965c8160ed8995d13466569736.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 328, + 284, + 493, + 381 + ], + "page_idx": 38 + }, + { + "type": "image", + "img_path": "images/37feb29d085233a01159a45ae5e03d6b57ff8f11c715ca55b4a77dfee3fc645b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 284, + 666, + 381 + ], + "page_idx": 38 + }, + { + "type": "image", + "img_path": "images/5d41134c1d56df0d9d4e13222e77a77b5332513dbf0e28ef10e50470295d17e3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 284, + 836, + 382 + ], + "page_idx": 38 + }, + { + "type": "image", + "img_path": "images/98924e97a312fd055c311dfce4f2f33c194a08959c53493dda6596e9bbacc31e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 156, + 383, + 321, + 473 + ], + "page_idx": 38 + }, + { + "type": "image", + "img_path": "images/5930d203da41b02946e5b564261b3a200c305a6a549a4ba7c3961c80697c16ce.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 328, + 383, + 493, + 473 + ], + "page_idx": 38 + }, + { + "type": "image", + "img_path": "images/c52ea48115f508872bd5b5ec87bc06b52622f4b6d3cea22c4f58d26b4d869481.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 383, + 666, + 473 + ], + "page_idx": 38 + }, + { + "type": "image", + "img_path": "images/3270cef07702d259baaf487526f2ae6da683fab7cc7fbfd29b04b26044a58e69.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 383, + 836, + 473 + ], + "page_idx": 38 + }, + { + "type": "image", + "img_path": "images/d25e5d34348f2c9916b8707da5ebf28418590c98d607597c3205410432cd39ab.jpg", + "image_caption": [ + "Input Text: \"I took this photo while I was running, can you stabilize the image? it is too blurry.\"" + ], + "image_footnote": [], + "bbox": [ + 156, + 477, + 323, + 565 + ], + "page_idx": 38 + }, + { + "type": "image", + "img_path": "images/10e10b4b43504d485d1b89173a358ad873c0bb08494a06dc780f0f725b125906.jpg", + "image_caption": [ + "Figure 28: Task: image deblurring, aiming to remove the blur information to obtain a clear image. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the deblurred images. Observations: For motion deblurring, GPT-4o recovers some sharpness, especially in fine details like text or faces, but the content is not matched with the original image. Gemini 2.0 Flash sharpens the image slightly better in some cases but can introduce over-smoothing, making the result look artificial. InstructIR demonstrates the best deblurring performance overall — restoring clear edges, facial features, and text while maintaining natural textures. It consistently produces the most stable and visually convincing results across all examples." + ], + "image_footnote": [], + "bbox": [ + 328, + 477, + 493, + 565 + ], + "page_idx": 38 + }, + { + "type": "image", + "img_path": "images/324c435155e439e0e4927509fb4bc07e46d67b68706e04a95c81c2c43616dade.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 477, + 666, + 565 + ], + "page_idx": 38 + }, + { + "type": "image", + "img_path": "images/0d5d4b908f4ff3dbdff9941212fc68ab04e87f2ef75d6228897f56880dd27fc6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 477, + 836, + 565 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 191, + 613, + 290, + 631 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 379, + 613, + 437, + 628 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 516, + 613, + 645, + 628 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "InstructIR", + "bbox": [ + 707, + 613, + 797, + 628 + ], + "page_idx": 38 + }, + { + "type": "page_number", + "text": "39", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Super-Resolution", + "text_level": 1, + "bbox": [ + 169, + 229, + 305, + 247 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Evaluation: Image Quality.", + "text_level": 1, + "bbox": [ + 377, + 244, + 614, + 267 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/4f5025e7e13970ffe6ae4d344132af9a438277305e7345db9006ce618d0573fb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 166, + 272, + 328, + 396 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/16dc34668ed85bc66c78fa13a9e59e97bd1551b7dcf1f770849b1c6ec45313ab.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 272, + 496, + 395 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/536066035c978487014a28aff6357f8949e5085087a1135fefd97bd330b10ad5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 272, + 666, + 395 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/9bfe1fd24e221db430a1ef2c97a6b127a44b5d7b0c9d1975f8c8209268003e53.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 272, + 834, + 395 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/787c6e3871eb657b7ad2619b07c940f4253e90dda0a171100d9c16bb8ce5179e.jpg", + "image_caption": [ + "Figure 29: Task: image super-resolution, aiming to improve the image resolution. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the deblurred images. Observations: In super-resolution, InstructIR delivers the most natural and detailed results across all examples—restoring fine edges in the card reader, realistic texture on the octopus, and sharp trees in the landscape. GPT-4o enhances clarity but misses details like the octopus surface and tree leaves. Gemini 2.0 Flash produces sharper outputs than GPT-4o but introduces unnatural textures and artifacts, especially in organic regions like the octopus and foliage." + ], + "image_footnote": [], + "bbox": [ + 168, + 402, + 326, + 501 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/89139fed496336187890615f822656f053268a0928b6b8aad1304fa164575873.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 402, + 495, + 501 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/c817252ab152b2bdf4e05c1e86107c2ae73fac4e18e54c85852a65c86d6eb4ff.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 402, + 663, + 501 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/deeb960aa331c541bdfd82bbc98e2d7b1232c00088999d254c3e72f9a70ad184.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 402, + 833, + 501 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/4b7ed5536998e9b32588208e95ba79958f7565c5f1a32016788821d35ecd191a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 168, + 508, + 326, + 598 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/f91d6cbc924b18c4e72f2efd1f2c0ffe7a85d740967c0f696474bad835204b3e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 508, + 496, + 597 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/23c403764728be45b17ee5db54c9ec0e5a6b239c1402350ba72598d480c17fbc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 508, + 663, + 597 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/7c6e1db18970fe110e86871683e72f4fceaeb6a8c3e0c16fe2dfb612916f8a04.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 508, + 833, + 597 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Input Text: \"Make my photo bigger and better. Add details to this image. Increase the resolution of this photo.\"", + "bbox": [ + 169, + 603, + 812, + 635 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 196, + 645, + 295, + 662 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 388, + 645, + 444, + 659 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 517, + 645, + 645, + 660 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "InstructIR", + "bbox": [ + 700, + 645, + 787, + 660 + ], + "page_idx": 39 + }, + { + "type": "page_number", + "text": "40", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Inpainting", + "text_level": 1, + "bbox": [ + 194, + 188, + 277, + 205 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Evaluation: Image Quality.", + "text_level": 1, + "bbox": [ + 377, + 205, + 614, + 224 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/c9d161d0a7f9636e0f15f7cf8b38ef3cde507b88822b80d1c054fc17965684c8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 234, + 321, + 345 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/5d0d22f1be19f77f4671c9695622b0c8c720ea17567c646c8545d5c47f0261dd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 234, + 483, + 345 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/855b2a4ebf86a04b3d8e45ed6cc6bfe5358ad56960cbb002301c9715b3a3062e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 234, + 651, + 345 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/2a2ba40c70067c2bbfe87ba3140fb913b131f18ca0f6a0801c01555583398b16.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 236, + 823, + 345 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Input Text: \"Please inpainting the image, make it looks reasonable.\"", + "bbox": [ + 184, + 349, + 712, + 366 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/327aff3d754879bc07e69c135ac470cd76c2e5d5328c263cd495368496462207.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 369, + 313, + 474 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/fbb3f36e987742956eb393471e0d4822531b76d7cb4cd749008a8301e0d7abaf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 372, + 475, + 476 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/8da79e08847cb47bf66249a4079de84e4d65ff7744d90920fa8a900bbdbaed53.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 372, + 653, + 477 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/d238501a0cfe7a41a5b2875bb5e131d9ad25dd839d448e2d25bc5ab1fcd60ac6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 372, + 821, + 477 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Input Text: \"Please inpainting the image, make it looks reasonable.\"", + "bbox": [ + 186, + 478, + 715, + 494 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/0f8a4c52b6db3fbfb6fe2b7d165d3273b2849687f1ef570d6810290c516025db.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 503, + 316, + 614 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/2f732805f2b0034e45b6e06d068adc0a84b73d56f50fb84d71c1d98da5382f36.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 338, + 503, + 475, + 614 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/92d34852cfd83335f95aa56035188712f19e68bf3f090085cf6440a634a2b66a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 505, + 651, + 616 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/82a5f5ec145a93551e9693a7e4e48cedd5a986546f72da6f448a7c2c57ab92ee.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 503, + 821, + 614 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Input Text: \"Inpaint the missing part of the face in the image, making the restored area look natural and seamless.\"", + "bbox": [ + 183, + 618, + 830, + 648 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 200, + 661, + 299, + 678 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "GPT40", + "bbox": [ + 382, + 661, + 439, + 676 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 516, + 662, + 643, + 676 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "LatentPaint", + "bbox": [ + 712, + 661, + 803, + 675 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Figure 30: Task: Image inpainting, aiming to restore missing or masked regions in an image to appear natural and consistent with the context. Setup: We compare GPT-4o with baselines such as Gemini 2.0 Flash [99] and LatentPaint [22], evaluating their ability to fill in masked regions realistically. Observations: GPT-4o produces plausible completions but often lacks fine structure and texture alignment—e.g., the bricks in the first row appear flat and misaligned. Gemini 2.0 Flash generates more visually coherent textures, especially in natural scenes like the second row, but can introduce slight over-smoothing. LatentPaint performs the best, accurately reconstructing facial details and complex textures such as hair and expression in the third row, demonstrating superior semantic understanding and visual consistency.", + "bbox": [ + 125, + 708, + 869, + 819 + ], + "page_idx": 40 + }, + { + "type": "page_number", + "text": "41", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Outpainting", + "text_level": 1, + "bbox": [ + 233, + 97, + 313, + 112 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/842316d408aba300ecb8a4b27bfcbba0807e6bf4b66c38430aceb0e69a3016e2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 405, + 113, + 421, + 128 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Evaluation: Image Quality.", + "text_level": 1, + "bbox": [ + 424, + 116, + 604, + 131 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/ecc01e85a79c76a542d72a3c3811916efd3af7089be84ce35914d93646b60cbd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 217, + 135, + 346, + 239 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/63bf7481f04803305e25b194d0f6a798e29e4ad8347b13488f22af38472f7dbb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 361, + 136, + 500, + 241 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/16e0aacca78d0ddc7841c44b3666005097467f6eb11e4ce97e8f4b2ead39c10b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 136, + 648, + 241 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/e739ed6e7d2e595e03926be7ac6c7b1fb3bd3badc49f246dca21b01655f93b1c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 137, + 795, + 241 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Input Text: \"Inpainting this image: a classic dark brown leather Chesterfield loveseat with tufted detailing and rolled arms. It sits in a cozy, traditionally styled living room with green walls, framed artwork, and warm lighting, creating an elegant and vintage atmosphere.\"", + "bbox": [ + 225, + 243, + 790, + 296 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/62b37976bf23d0f522572b1d32ef2041be79cd68b5ed5fc0163a0fe71955417f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 212, + 297, + 349, + 402 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/96f8443e9e1d95f4d7b78542413a7daa8e652b264b9a0a1fb5f13e9ccdf11c2d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 361, + 297, + 500, + 402 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/dd6454a272ee52c994805637b87e75f5966a46cdbd76435fa0cf210c205b0168.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 297, + 648, + 402 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/2d141aa5b85e10194f8145486f1551abb222659e93a01d192127766df99e63e9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 297, + 795, + 402 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Input Text: \"Extend the image to the left and right with a realistic continuation of the street, sidewalk, and background buildings. Maintain consistent lighting, shadows, and overall style.\"", + "bbox": [ + 222, + 404, + 779, + 444 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/d6cb4c9f05457cd35c0effca1f6abd864d75f43df91491467710d0bc3721a83c.jpg", + "image_caption": [ + "Figure 31: Task: Image outpainting, aiming to extend the visual content of an image beyond its original boundaries coherently and realistically. Setup: We compare GPT-4o with Gemini 2.0 Flash [99], and some Specialized outpainting methods (SGT+ [116], StrDiffusion [66] and Dream360 [1]), evaluating their ability to extend content while maintaining visual consistency in lighting, texture, and semantics. Observations: The Specialized outpainting methods consistently produces the most coherent extensions — for example, it accurately maintains the room's lighting and decor in the first row, continues architectural lines and street perspective in the second, and creates seamless snowy landscapes in the third. GPT-4o offers plausible structure but often lacks fine detail and texture continuity, such as mismatched snow gradients or missing shadows. Gemini 2.0 Flash performs slightly better in semantic extension than GPT-4o but can introduce lighting inconsistencies and abrupt transitions, particularly in wide scenes like the desert in the final row." + ], + "image_footnote": [], + "bbox": [ + 212, + 445, + 349, + 550 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/88aa98d1328c5f92799e7830df3cc592e78892873c7d450de34f064e06cadf11.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 361, + 445, + 500, + 549 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/dd2dae5038367f85e716992705228d85cfed4fff5609bcaadfd9baa3415e36b6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 445, + 648, + 549 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/a25788560611502b56750b8036243b4afafc5fde1438409b033768440c793002.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 445, + 795, + 549 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Input Text: \"Extend the image to the left and right, filling the black areas with a natural continuation of the snowy mountain landscape, ski path, trees, and sky. Keep the lighting, shadows, and textures consistent with the original image.\"", + "bbox": [ + 223, + 551, + 771, + 592 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/999497601ef88fdd1c39e3f4a28faa7e03a78e030514867e7db3e4a3be244eff.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 212, + 593, + 349, + 696 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/5fd37721ff6283442746e2e6bca6d7639893b65c0c252601a4e8feeaec442651.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 361, + 593, + 500, + 696 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/899da298140f306785ebb56f86b92a0e2dfa83b94136c6477ed20cbea1efa3fe.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 593, + 648, + 696 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/80963c1dbf62d49450d0bd1cbdf60c75eaa325a5af95ee28f199c5cd0699b838.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 593, + 795, + 696 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Input Text: \"Outpaint the center of this panoramic image to naturally connect the left and right desert landscape. Fill the middle area with a realistic continuation of the rocky desert terrain and blue sky with clouds, ensuring seamless blending and consistent perspective.\"", + "bbox": [ + 217, + 699, + 761, + 753 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 241, + 757, + 328, + 771 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 406, + 758, + 455, + 770 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 522, + 758, + 632, + 770 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Dream 360", + "bbox": [ + 692, + 758, + 767, + 770 + ], + "page_idx": 41 + }, + { + "type": "page_number", + "text": "42", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Colorization", + "text_level": 1, + "bbox": [ + 189, + 156, + 282, + 170 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Evaluation: Image Quality.", + "text_level": 1, + "bbox": [ + 370, + 174, + 607, + 193 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/a545885da447cbcefa593efa1163db12a6ecaa2cdbbf6e96a524f3a907d3724c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 156, + 199, + 316, + 282 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/b33e1d9b8d191b6badbe6994b410d984583d93bb0eec0eb614f21170a2cdb7d5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 326, + 199, + 486, + 282 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/b0d254da72b338ef668b468732303651a5ecbf992ea9575f30ca06355f2c2edc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 199, + 658, + 282 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/0bad4b8a9e257c2529bf84609f3ee26b5086573d0af51df270d4d92513151ff4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 199, + 836, + 282 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Input Text: \"Colorize it: a red car parked on a cobblestone street.\"", + "bbox": [ + 166, + 289, + 691, + 306 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/3520766d4bd31fe95534ccef2679e0d3237151e64a06cfbdbcc03285013be219.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 156, + 314, + 316, + 398 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/5daddf44b2febe0e6103c50d850ffed87ab4624a8da3303ae0a44eb83321b1ed.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 326, + 315, + 488, + 398 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/82452cfd1770b08e14ee6fdce267d83915ea906cdd31384da9f70cf3e1cf38c3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 315, + 658, + 398 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/001d30c3201ae2f4664bf8ee499b66e8df811fc307c2c93884ed70982fa958ca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 314, + 834, + 398 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Input Text: \"Colorize it: a couple of white and black kittens that are sitting in the purple grass.\"", + "bbox": [ + 166, + 402, + 800, + 435 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/e0ea300e9865c0a0b5c3f4003da7770db1b8cd75b5f648d2fb6667866807f1bb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 156, + 436, + 316, + 520 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/2aea31980297b8bd9effa16d06efa9dd2a95f1e6a7b7a99dc48e7bb6e4ed6a27.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 326, + 436, + 486, + 520 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/cc99e1fe7082b4516a42bfa326f45cb5f3fae6ae0f81c321344103d47d80cbb9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 436, + 658, + 520 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/8f533a46531ffe4ae8d62f47eb7fc7615dbfab620a667fb4d1593863f2cb286f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 436, + 834, + 521 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Input Text: \"Colorize it: a red sports car parked on the side of a street.\"", + "bbox": [ + 166, + 523, + 735, + 537 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/d10a34e202b94e2de1a4f2d1cd215f5be631f1577e3a8ebeac90787c1852e8e9.jpg", + "image_caption": [ + "Figure 32: Task: Image colorization, aiming to add realistic and semantically consistent color to grayscale images based on textual prompts. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and CtrlColor [59], focusing on their ability to follow instructions and produce visually natural colorized outputs. Observations: CtrlColor performs the best overall, generating vivid and accurate colors that precisely match the prompts—such as green lips and yellow sunglasses in the last row, or the purple grass and kitten hues in the second. GPT-4o provides reasonably faithful colorization but often lacks richness or misinterprets tones (e.g., slightly dull red in the third row or inconsistent purple grass). Gemini 2.0 Flash is more vivid than GPT-4o but tends to oversaturate or produce stylized effects, especially on human features." + ], + "image_footnote": [], + "bbox": [ + 161, + 542, + 320, + 666 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/ab3420504662b7f8244c018e079e1cffd82ab918262a3443ff4a8a16e52e66fe.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 542, + 491, + 665 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/45547d6bbb7a37e719ea603008ab1f1389806043fdae0a8ed425ac866ed4b27c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 542, + 661, + 665 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/cec51b759ae2fd86f4a6cd9245c40a9af755b055698dd1da10fcbcf1565849ae.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 678, + 542, + 839, + 665 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Input Text: \"Colorize it: a woman wearing a yellow sunglasses with green lips\"", + "bbox": [ + 169, + 667, + 769, + 685 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 189, + 693, + 287, + 709 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 377, + 693, + 434, + 705 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 514, + 693, + 640, + 705 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "CtrlColor", + "bbox": [ + 725, + 693, + 797, + 705 + ], + "page_idx": 42 + }, + { + "type": "page_number", + "text": "43", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Shadow Removal", + "text_level": 1, + "bbox": [ + 169, + 152, + 300, + 167 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/cfa4811e58b535cdfd35ac61baf40239638c41912743eed71869ca83aded9682.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 385, + 174, + 406, + 191 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Evaluation: Image Quality.", + "text_level": 1, + "bbox": [ + 408, + 178, + 622, + 194 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/e533493affd9962f40a067f07476309f50b38f805e9b7e4cb111bd1d65a85975.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 163, + 198, + 331, + 297 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/51cc0442254dbc7d3906e354e679e7c97663cf26955df12ad376ac2c249891a4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 198, + 500, + 297 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/038bd8bcbb952c2d249e6b969e11edf6c615ea7d80b89e647965ce83d7b12973.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 198, + 676, + 297 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/5d3c5fa7d9c2e13ae59f463d54f3c900277b032d69c97c5dd725781b8fc54200.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 198, + 849, + 297 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/8280c76d3783095ac597ffe6112e8e97d4768a43158ffbeaed59287bec81116b.jpg", + "image_caption": [ + "Figure 33: Task: Shadow removal, aiming to eliminate harsh shadows while preserving the integrity of the scene, textures, and lighting balance. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and ShadowRefiner [25] to evaluate how well each method removes shadows and retains original object fidelity and lighting consistency. Observations: ShadowRefiner consistently achieves the most natural and effective shadow removal. It produces even, diffuse lighting across all scenes—e.g., softening shadows without distorting textures in complex scenes like the miniatures and dog portrait. Gemini 2.0 Flash removes shadows reasonably but occasionally leaves faint traces or flattens contrast, as seen in the second and fourth rows. GPT-4o shows stronger shadow reduction than Gemini 2.0 Flash but sometimes alters surface brightness or loses detail fidelity. ShadowRefiner best preserves the original color tones and textures while eliminating harsh shadows." + ], + "image_footnote": [], + "bbox": [ + 163, + 301, + 330, + 400 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/abb6c191cd8f3dbc58255f85b9eca1ff0ecbe31840d418b03345458fcd36ab20.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 301, + 504, + 401 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/898c8f34d0da327db20aba6653163e933c3b881344fb81df6b0f416dff0a65df.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 301, + 674, + 401 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/1bef728ebb6bc7f9c952867864380f4412a3ff879334ec345c1602238ad09c54.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 301, + 848, + 400 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/51b49e12056077fe1afb554b56beba3c707955dbdefb32d0bdf28a7d9683ab6a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 163, + 401, + 330, + 500 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/e53143ad29978bfb50a47d639d116221922ece9fa13749ad760e4f2ca7cbecbf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 333, + 401, + 504, + 501 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/aa0187c33d100a82b28f2505575ea9e419bbb4b364bc8feea7bf801423e56d3a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 401, + 674, + 501 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/e391d4c6d98af6fcc61c7f1d8e60d08040956f96e2c5d61ef9f3aac4d730f7a0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 401, + 848, + 500 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/c159dda7b72f6548e3a35745859137667be43c3d3382e30061df21040584ebd3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 163, + 503, + 333, + 604 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/3fda08a0f5b089580c34cfd81ff50f99e0d4660564c4d9f7d8ce3de3d5c373c0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 503, + 504, + 604 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/af45afc3aa4a8340ccb4e22deeda2a924e0315faecedd77d56e667ae464a5ec2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 503, + 676, + 604 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/f7a549c39bdf0880255cebc34e0049aef8e949afe607ba2d7ca38743e5ee21e8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 503, + 846, + 604 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Input Text: \"Remove all harsh shadows from the image. Make the lighting even and soft across the entire scene. Preserve all objects, colors, and details exactly as they are. Make it look like it was taken under diffuse studio lighting.\"", + "bbox": [ + 171, + 609, + 826, + 657 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 199, + 672, + 300, + 691 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "GPT40", + "bbox": [ + 380, + 674, + 439, + 688 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 522, + 674, + 651, + 688 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "ShadowRefiner", + "bbox": [ + 710, + 674, + 828, + 688 + ], + "page_idx": 43 + }, + { + "type": "page_number", + "text": "44", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Reflection Removal", + "text_level": 1, + "bbox": [ + 158, + 98, + 312, + 112 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/a92950a6ffd6c5949ed67dbcd8ef27f7a4455d1b77975ddb550332fdae784d04.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 372, + 126, + 393, + 145 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Evaluation: Image Quality.", + "text_level": 1, + "bbox": [ + 395, + 131, + 607, + 147 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/26dcc1fa3b38be01934c8e24ea493d1a80141258eb76a34f67d30c186588e554.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 168, + 156, + 321, + 253 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/8f1ec7ef5d643c0bfda5c6dcee55e55fbae700384aa8d8ff28e6f695f19abd73.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 338, + 157, + 491, + 255 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/e8c0506441fcc7c04d242543b53f3d7af01afae300d6edb7c5571fbe34319a8c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 157, + 661, + 256 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/fce6882ef70a409cc3043ec9739951fd8feb6eec25edb8d9c56a80dda6894b5f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 678, + 157, + 830, + 256 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Input Text: \"Remove window reflections, preserve interior details clearly visible through the glass, maintain natural lighting and perspective, photo-realistic result.\"", + "bbox": [ + 169, + 257, + 810, + 289 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/a39507e46b3ffd1bbdf0ff1d20f603e166fb2c83a3cb4b81c304a49bc56d055e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 168, + 292, + 321, + 391 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/d780d806748e7a2e2d245f4bb75612e3a332fab55a1cc6b0fa0ad203f17dd8b5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 292, + 490, + 391 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/293cc3e02dece56450be7a6542b0151c4dcffe5012347d53906f237826b4a55e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 508, + 292, + 661, + 391 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/a3941e54ef0c8ca433e936496f30948b94b4e9f8ef2115b981275376d66cf6b7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 678, + 292, + 830, + 391 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Input Text: \"Remove the reflection of buildings on the wet ground surface, make it look like a clean and dry textured concrete floor, realistic lighting and natural color tones.\"", + "bbox": [ + 158, + 395, + 841, + 428 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/2997ae5bbb03c3903ad7d778e996f50f9bff8f0a1ab2e783b344083f30f3cce3.jpg", + "image_caption": [ + "Figure 34: Task: Reflection removal, aiming to eliminate unwanted reflections from transparent or reflective surfaces while preserving original content and realistic lighting. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and DSIT [39], assessing their ability to remove reflections while maintaining scene realism, texture fidelity, and lighting consistency. Observations: DSIT shows the most effective and natural reflection removal across all examples. It restores interior visibility through windows (e.g., bed and car interior) while preserving lighting and geometry. Gemini 2.0 Flash removes some reflections but often leaves faded traces or dulls textures, especially on glass doors and wet pavement. GPT-4o performs better than Gemini 2.0 Flash in preserving background details but sometimes alters color tones and sharpness. Overall, DSIT provides the cleanest and most photorealistic results, especially for transparent surfaces like glass and reflective wet ground." + ], + "image_footnote": [], + "bbox": [ + 168, + 431, + 321, + 545 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/145f3c5298c9786c810dc961b48e4e79d1a401610fd467498400ef8e5de81c24.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 431, + 490, + 545 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/517e9f2a30d20e6e67d1e7cc8d3275c188d4ba9fe1e3f526c5ea127a17812855.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 508, + 431, + 661, + 545 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/5d871e96ad190ac8245ba7e97f4356f76317189e597d17530c30c48b2ef31192.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 431, + 830, + 545 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Input Text: \"Remove reflections from the glass doors, make the interior clearly visible with natural lighting and sharp details, keep the golden door frame realistic and intact.\"", + "bbox": [ + 165, + 547, + 839, + 580 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/4d9a72541da86687fce106cdee64c185f7126d2f2e035a9623361561c2006136.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 169, + 585, + 321, + 681 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/06b68c2bd1d1e1e567e71454157f92909dcb91789d0b950ba4f61dd09198910d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 585, + 490, + 680 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/1f17bc39103c2c1bcbdf239933f68441984fad34a4c78c96991f6e9a246af36a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 508, + 585, + 661, + 680 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/6bf819c8a7dad12a034e9854583b927e93387ef6c95c36f44d5e4a7ebbd9eef8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 678, + 585, + 830, + 680 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Input Text: \"Remove reflections from the car window, make the interior of the vehicle clearly visible, preserve natural lighting and realistic textures, keep the car frame untouched.\"", + "bbox": [ + 166, + 683, + 810, + 729 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 191, + 741, + 290, + 757 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 380, + 741, + 437, + 755 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 521, + 739, + 650, + 755 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "DSIT", + "bbox": [ + 733, + 739, + 779, + 755 + ], + "page_idx": 44 + }, + { + "type": "page_number", + "text": "45", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Image Re-lightning", + "text_level": 1, + "bbox": [ + 215, + 101, + 349, + 116 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/1a9a2fe4848dccc2cbdc43a21645b7d9153e4a783d4634d91cd4b7240855ca79.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 403, + 118, + 423, + 133 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Evaluation: Light consistency.", + "text_level": 1, + "bbox": [ + 424, + 121, + 632, + 136 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/303aadcf1f413c53fe51a10f2bbdb1d2f38eaebbbf1bafbaf65caa46e66c8863.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 210, + 133, + 316, + 215 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/73a85fa3c615d1faa5d51ccafc720c7d1d72730add58f7b85b6f81378602781d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 325, + 136, + 428, + 215 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/89185a1d1d4373656d69efacb10271368adb3f6ab4aa29bb7ce10ccd11ba70a6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 439, + 136, + 553, + 215 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/33aeab41543cb1f610d264106b611a2050ccfaaf21583d7c7034d3b1ef94d142.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 563, + 136, + 676, + 215 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/1186cfc667444853a5ea105221e7465b842983362ddd848dab0f81318dea818f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 686, + 136, + 787, + 215 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Input Text: \"Given two input images:", + "bbox": [ + 217, + 218, + 447, + 232 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Image 1: A classical marble statue in neutral lighting.", + "bbox": [ + 217, + 232, + 545, + 244 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Image 2: A city street at night illuminated by neon pink and blue lights.", + "bbox": [ + 217, + 244, + 656, + 257 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Please generate a relit version of the statue from Image 1, as if it were lit by the lighting conditions of Image 2.", + "bbox": [ + 217, + 256, + 772, + 280 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "The result should preserve the details and pose of the statue but apply realistic colored lighting and shadows consistent with the vibrant, mixed neon lighting of the second image.", + "bbox": [ + 215, + 279, + 772, + 306 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Light Map", + "bbox": [ + 227, + 309, + 292, + 323 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 334, + 310, + 413, + 323 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 473, + 310, + 519, + 321 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Gemini Pro 2.0", + "bbox": [ + 571, + 311, + 663, + 321 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "IC-Light", + "bbox": [ + 712, + 311, + 767, + 324 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Text-Prompt Image Re-lightning", + "text_level": 1, + "bbox": [ + 209, + 340, + 429, + 356 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/dfe5bd0363834d7a517a03c99514ab82f92032ef90b648cf55ef38ce418a6054.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 212, + 369, + 339, + 489 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/7a43c02aec8d995221ba9a8b29fbfb292e2b680c4330dfae1451b1cbd1e28f15.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 356, + 369, + 483, + 488 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/8aee6376931ccae47da7774dcc80f87cb4655a39088d2495592ef2b55cc6e8ce.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 371, + 637, + 489 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/6b14e73a8dcd42c4119885d683c4b57a1f2394b2ba3d7cfed0c412d829af1eb7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 371, + 787, + 488 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Input Text: \"Sunlight through the blinds, near window blinds with a reasonable background.\"", + "bbox": [ + 217, + 489, + 756, + 517 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/9353609b07179915947a49bbe7cb97c69d989d42af40bfbeb65de7a2a7c68425.jpg", + "image_caption": [ + "Figure 35: Task: Image relighting, aiming to modify the lighting of a given image based on either a reference light map or a textual description, while preserving identity, texture, and spatial consistency. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and IC-Light [122] on two subtasks: reference-based and text-based relighting. Evaluations focus on lighting realism, directionality, shadow accuracy, and semantic preservation. Observations: IC-Light achieves the most realistic and consistent relighting across both tasks—accurately applying neon lighting from a reference image and generating sharp shadows and natural light from text prompts. Gemini 2.0 Flash preserves content well but produces softer, less directional lighting. GPT-4o offers more vivid lighting than Gemini 2.0 Flash but sometimes lacks shadow accuracy or background coherence." + ], + "image_footnote": [], + "bbox": [ + 215, + 518, + 334, + 626 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/81995073d133c3b619b48c6e202b90c68116f067eb1b41f3fb9d692f8ddaef05.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 369, + 518, + 480, + 625 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/0bcf87aef23dd6d3d1cfe80c898e5921b661e040732d6eeef59708f811da4ad7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 513, + 518, + 635, + 625 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/1d6da19c3cbec9b2d439612a2a8553113f28a4397139ddfe38d9848ae7430cc6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 518, + 787, + 626 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Input Text: \"Sunlight from the left side, beach with a reasonable background.\"", + "bbox": [ + 220, + 628, + 761, + 643 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/39860355d35a8389c1208ff488e0389da02ec58d4919778ab96d9ee07712b2fd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 209, + 646, + 341, + 753 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/b5b70fc8304a56c9d8bae3967435e4b106ab643b441b2630a6c6e823636ba844.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 357, + 646, + 488, + 753 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/2f4b6a584e5347f2a2ffa4c55015f094171d881e87ced85fc577e90c71f0f4a9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 646, + 637, + 753 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/9e208291de9a6b6fb7d4acda0172d03e668194c7eb8b35eb83dd89d00607f9ab.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 646, + 787, + 753 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Input Text: \"Sunlight from the left side, beach with a reasonable background.\"", + "bbox": [ + 225, + 755, + 761, + 770 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 230, + 777, + 318, + 792 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 403, + 777, + 452, + 790 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 517, + 777, + 630, + 791 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "IC-Light", + "bbox": [ + 699, + 777, + 758, + 792 + ], + "page_idx": 45 + }, + { + "type": "page_number", + "text": "46", + "bbox": [ + 488, + 936, + 508, + 946 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "2.2.6 Spatial Control", + "text_level": 1, + "bbox": [ + 127, + 90, + 289, + 104 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Spatial control aims to generate visual outputs that not only reflect the content described in the prompt, but also precisely adhere to additional structural conditions (e.g., canny edge maps, depth maps, sketches, poses, and masks). This task evaluates a model's ability to faithfully align text guidance with visual constraints—an essential capability for real-world creative applications such as illustration, animation, digital content creation, and visual storytelling.", + "bbox": [ + 125, + 114, + 870, + 172 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "In this section, we examine GPT-4o's performance across five representative types of controllable conditions: canny, depth, sketch, pose, and mask. For each setting, we compare its outputs with those from Gemini 2.0 Flash [99] and a strong baseline method using ControlNet-based [121] diffusion backbones (FLUX.1-Dev [51], SDXL1.0 [82], SD3 Medium [27] or SD1.5 [90]). The results are illustrated in Figures 36, 37, 38, 39, 40.", + "bbox": [ + 125, + 176, + 870, + 233 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Overall, GPT-4o achieves performance that is on par with ControlNet-based methods in many cases, especially under common or moderately complex conditions. In particular, GPT-4o is capable of handling semantically rich or contextually complex prompts, where its strong foundation model understanding can help preserve both high-level semantics and visual plausibility. This is especially evident in tasks like pose-to-image or mask-to-image, where the structural signal may be sparse or ambiguous. However, GPT-4o's strong generative prior can sometimes lead to overly detailed or hallucinated elements, which compromises structural fidelity. For instance, in canny-to-image or depth-to-image tasks that require fine-grained geometric alignment, GPT-4o may deviate from the input layout more noticeably than traditional diffusion-based methods. In contrast, ControlNet exhibits more stable and accurate control in these low-level structure-guided scenarios, making it better suited for applications where spatial accuracy is critical. That said, ControlNet may struggle in more complex or open-ended cases, such as mask-to-image scenes involving multiple objects or interactions (e.g., aquariums with visitors and fish). In these scenarios, GPT-4o's strong cross-modal understanding partially compensates for its weaker control, offering plausible but not fully precise outputs. By comparison, Gemini 2.0 Flash lacks robust controllable generation capabilities across all evaluated control types. Its outputs often fail to match either the control condition or the textual prompt, reflecting limited capacity in multimodal alignment and structural grounding.", + "bbox": [ + 125, + 238, + 869, + 448 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "In summary, GPT-4o demonstrates performance comparable to SOTA methods in most cases, excelling in tasks that require rich semantic understanding and contextual complexity while maintaining a balance between high-level semantics and visual plausibility. Although it may exhibit structural deviations in tasks requiring precise geometric alignment, its strong generative prior gives it an advantage in handling complex or open-ended scenarios.", + "bbox": [ + 125, + 452, + 870, + 510 + ], + "page_idx": 46 + }, + { + "type": "page_number", + "text": "47", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Canny-to-Image", + "text_level": 1, + "bbox": [ + 169, + 150, + 294, + 167 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Evaluation: Controllability and text consistency.", + "text_level": 1, + "bbox": [ + 300, + 186, + 710, + 205 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/678977bfcc7731282c3cf0706d2a04a055df0dfc7950b35c45d46e6ffafa9252.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 277, + 183, + 297, + 202 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/34eb588af3ad0d16e975cf94c0165c9ea9cdbab78610d9c4fea8f37457c30f62.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 150, + 214, + 316, + 344 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/09b169752ee560ce282e0e189791bd22c980b5f6d260fd06d327d0bb93aa3fdc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 323, + 214, + 491, + 343 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/7ba7f124d58c88bee0135fc235b0aebd8b6426909a755ac1f40c313c7c9cad81.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 215, + 660, + 344 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/e2e8d2f4bc567a25d1d7f56d98489dc9897b06c00ba90d8a58c28ba70a1e2151.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 215, + 836, + 344 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Input Text: \"Follow the prompt and canny condition below to generate a controllable image. The prompt is: a cigarette with purple tobacco.\"", + "bbox": [ + 156, + 348, + 797, + 380 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/509dd28767c58df8dc35fefb0ee74154d0b9a3aadcb516526f252eee3fbd72ef.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 150, + 386, + 316, + 515 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/01c729abfdd7ba1065b518b5f8fac38df17a322d899f16a92e21606ffe75be31.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 323, + 386, + 491, + 515 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/572feead6e536b7524eb2f723ff12841c3e537418651f45988dbcb4312766cf2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 386, + 663, + 515 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/9b39db21bfca83d6660c37f7265a41bd9d7830647b6fcbdaafa3cc14372c9c6e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 386, + 836, + 515 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Input Text: \"Follow the prompt and canny condition below to generate a controllable image. The prompt is: a traffic sign with red cross written on it.\"", + "bbox": [ + 156, + 521, + 797, + 553 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/582236643d6a53099264228610a5ef869dd1b77a4e837887915e57f35aec63bc.jpg", + "image_caption": [ + "Figure 36: Task: Canny-to-Image generation. The goal is to generate prompt-aligned images guided by canny maps. Setup: Each row shows an input canny map and a text prompt, with outputs from GPT-4o, Gemini 2.0 Flash [99], and FLUX.1-Dev w. ControlNet [51]. Observations: GPT-4o performs worse than FLUX.1-Dev [51] in structural fidelity, often introducing additional visual details that deviate from the input edge map. However, it produces more semantically aligned and aesthetically pleasing results overall. Compared to Gemini 2.0 Flash, GPT-4o significantly outperforms in both structure preservation and prompt consistency." + ], + "image_footnote": [], + "bbox": [ + 150, + 560, + 316, + 689 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/2bc479794ab27b52ddd075f3f33d1e15a260aae2e169b634378ccdde014ad491.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 323, + 560, + 491, + 688 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/d328a02168204bffd240fc039c323fc7a9152c7db8d314980163eda7d00ecf5d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 560, + 661, + 688 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/65ad8232e82ff1d205196c6cd60b284ad9f6d0f84803d62c20199cec22907447.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 668, + 560, + 836, + 688 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Input Text: \"Follow the prompt and canny condition below to generate a controllable image. The prompt is: oil painting of geese flying in a v formation over a pond at sunset.\"", + "bbox": [ + 156, + 691, + 820, + 723 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 187, + 731, + 285, + 747 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 380, + 732, + 436, + 744 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 516, + 732, + 640, + 744 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "FLUX.1-Dev w. ControlNet", + "bbox": [ + 694, + 726, + 800, + 753 + ], + "page_idx": 47 + }, + { + "type": "page_number", + "text": "48", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Depth-to-Image", + "text_level": 1, + "bbox": [ + 169, + 156, + 295, + 172 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Evaluation: Controllability and text consistency.", + "text_level": 1, + "bbox": [ + 300, + 188, + 710, + 205 + ], + "page_idx": 48 + }, + { + "type": "image", + "img_path": "images/74777175c863d9f366b45a7843d8eb9164d1d6ccee90d1c36c565abd5a6cd450.jpg", + "image_caption": [ + "Input Text: \"Follow the prompt and depth condition below to generate a controllable image. The prompt is: a wooden bridge that has fallen down in the grass.\"" + ], + "image_footnote": [], + "bbox": [ + 148, + 215, + 316, + 345 + ], + "page_idx": 48 + }, + { + "type": "image", + "img_path": "images/4269989ea478ceccbce68d7fb7a7ad5b3bda0673dc3e29b9f2ccd7951d3cd19a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 215, + 490, + 345 + ], + "page_idx": 48 + }, + { + "type": "image", + "img_path": "images/c4a439744d0603cca113101ddccdd39ff6f86154f75db50db717e2e21c43d890.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 495, + 215, + 663, + 345 + ], + "page_idx": 48 + }, + { + "type": "image", + "img_path": "images/c34d4877edd4f942f60dcc2c4527c7ca7b8db7854520d882872d6a05a6e0c924.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 215, + 836, + 345 + ], + "page_idx": 48 + }, + { + "type": "image", + "img_path": "images/18cac760bb10b2890aca59d644e60712db22b56fe52a9734a701f01dc680756b.jpg", + "image_caption": [ + "Input Text: \"Follow the prompt and depth condition below to generate a controllable image. The prompt is: a 3d image of a stone building with plants and rocks.\"" + ], + "image_footnote": [], + "bbox": [ + 150, + 383, + 316, + 513 + ], + "page_idx": 48 + }, + { + "type": "image", + "img_path": "images/d96c269179392a477b20cac488bf504f948497d76d9712d151212c0dc9a0c84f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 383, + 491, + 513 + ], + "page_idx": 48 + }, + { + "type": "image", + "img_path": "images/033fe2dbc7534a41ab6e0d52eae6d42fbd80b6b350bb3fbca8b041c43fc41b40.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 383, + 663, + 515 + ], + "page_idx": 48 + }, + { + "type": "image", + "img_path": "images/46b577cbc8feb431b513caaf0fa800f1004a3c0cb4738f315c72a97f38bb97c4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 383, + 838, + 515 + ], + "page_idx": 48 + }, + { + "type": "image", + "img_path": "images/0d51b383aa6e1f47aa6ac0e33d09504e867f5f452a0b912bdf9c993b4e2622b3.jpg", + "image_caption": [ + "Input Text: \"Follow the prompt and depth condition below to generate a controllable image. The prompt is: a red pillow on a chair.\"" + ], + "image_footnote": [], + "bbox": [ + 150, + 553, + 316, + 641 + ], + "page_idx": 48 + }, + { + "type": "image", + "img_path": "images/46017199672cdc547f9700dfe377960c656d99e3ef6cc597d38c0dbafddd087b.jpg", + "image_caption": [ + "Figure 37: Task: Depth-to-image generation, aiming to synthesize controllable and visually coherent images based on a text prompt and a given depth map. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and FLUX.1-Dev w. ControlNet [51], focusing on controllability, text-prompt alignment, and the visual quality of generated scenes. Observations: GPT-4o generates visually appealing and stylistically consistent images that align reasonably with text and depth cues—such as the bridge scene and stone ruins with rich lighting and artistic tone. However, its controllability is weaker than FLUX.1-Dev w. ControlNet [51], which shows more precise depth alignment and object placement, as seen in the accurate layout of the bridge and red pillow. GPT-4o leans toward stylized coherence, while FLUX emphasizes photorealism with sharper spatial fidelity. Gemini 2.0 Flash lags behind both, often showing depth misalignment, shape distortion, and weaker semantic grounding." + ], + "image_footnote": [], + "bbox": [ + 321, + 553, + 490, + 641 + ], + "page_idx": 48 + }, + { + "type": "image", + "img_path": "images/90148daf477655ba5438aee9571917bb7d337f466c861465128cb7f0a6616246.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 495, + 553, + 663, + 641 + ], + "page_idx": 48 + }, + { + "type": "image", + "img_path": "images/c2e5c471cced0e2bc19cbb418990047f6502461f9ab068015212292ab802d155.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 553, + 836, + 641 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 186, + 684, + 284, + 700 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 377, + 684, + 433, + 698 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 514, + 684, + 640, + 698 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "FLUX.1-Dev w. ControlNet", + "bbox": [ + 692, + 674, + 800, + 704 + ], + "page_idx": 48 + }, + { + "type": "page_number", + "text": "49", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Sketch-to-Image", + "text_level": 1, + "bbox": [ + 163, + 125, + 299, + 140 + ], + "page_idx": 49 + }, + { + "type": "image", + "img_path": "images/ca0e9e3e4ad44d1a4998e2e53190039767bd351173e262b703bb663bf16e4dd2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 276, + 157, + 297, + 176 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Evaluation: Controllability and text consistency.", + "text_level": 1, + "bbox": [ + 302, + 162, + 710, + 180 + ], + "page_idx": 49 + }, + { + "type": "image", + "img_path": "images/b553f7bab31ff9efef4880bc6749cc3b0972fb954926e7cddb979fe4fa86b1e4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 148, + 189, + 316, + 319 + ], + "page_idx": 49 + }, + { + "type": "image", + "img_path": "images/e37560da746ee64c9f4dbda99f18792081354ee1290d5fb107e6b17833cab89b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 189, + 491, + 319 + ], + "page_idx": 49 + }, + { + "type": "image", + "img_path": "images/774a2f8158ca42b89d06121387988ac2fa21eab2708a59d2a40c47aeeefeea74.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 495, + 189, + 663, + 319 + ], + "page_idx": 49 + }, + { + "type": "image", + "img_path": "images/9b25e051c800568d87dbf568b975a694ac1831be64dac4fe52e936e4cbe78e4f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 189, + 836, + 319 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Input Text: \"Follow the prompt and sketch condition below to generate a controllable image. The prompt is: A small giraffe eating grass.\"", + "bbox": [ + 156, + 323, + 805, + 354 + ], + "page_idx": 49 + }, + { + "type": "image", + "img_path": "images/76b3ffacb274a3c770ba9db5c2fcea3609020e8b054535fa7f050994e7337844.jpg", + "image_caption": [ + "Figure 38: Task: Sketch-to-image generation, which requires translating rough line drawings into realistic and semantically accurate images guided by text prompts. Setup: We evaluate GPT-4o against Gemini 2.0 Flash [99] and SDXL1.0 w. ControlNet [82], focusing on how well each model respects the provided sketch while reflecting the described content. Observations: GPT-4o excels at generating lifelike scenes that match the prompt, often delivering visually pleasing and contextually grounded outputs—like the natural posture and setting of the giraffe or the dynamic movement in the parachute example. However, it tends to soften or reinterpret sketch lines, leading to slight mismatches in fine structure. In contrast, SDXL1.0 w. ControlNet [82] offers stronger adherence to the input sketch, capturing geometric details more accurately (e.g., fan blades and figure outlines), albeit with slightly more synthetic textures. Gemini 2.0 Flash shows limited understanding of both sketch and prompt, often producing less realistic or structurally off-target images." + ], + "image_footnote": [], + "bbox": [ + 145, + 359, + 316, + 488 + ], + "page_idx": 49 + }, + { + "type": "image", + "img_path": "images/5bbe5e94282bd4bc533b7d94b749decb59742f5aee7bcb68887dcce921c8b324.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 320, + 359, + 490, + 488 + ], + "page_idx": 49 + }, + { + "type": "image", + "img_path": "images/c3fac1a22608550f62bd5e74998a43c8ae3cd590d4dfa1647701dea7d93c7ce8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 495, + 359, + 663, + 488 + ], + "page_idx": 49 + }, + { + "type": "image", + "img_path": "images/b2861bea0423cee774150f0887bdb41c652b436f4eada06228dfb7f7f53078b3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 359, + 836, + 488 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Input Text: \"Follow the prompt and sketch condition below to generate a controllable image. The prompt is: A red metal electric fan.\"", + "bbox": [ + 155, + 493, + 805, + 523 + ], + "page_idx": 49 + }, + { + "type": "image", + "img_path": "images/6e8cda0469a84878dd4813f102a0c8e09c7e5e7f7b3d81edb91ab19c428a22a1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 145, + 527, + 316, + 656 + ], + "page_idx": 49 + }, + { + "type": "image", + "img_path": "images/b143d496a975b31c007201551ab2858b5f989539fd28932d11bf8433f8fa182f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 320, + 527, + 490, + 656 + ], + "page_idx": 49 + }, + { + "type": "image", + "img_path": "images/ecbf422861a6f3acb1bf2fa0c11269caa5fa86090bdbdd18230b1dcaaeb183d8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 495, + 527, + 663, + 656 + ], + "page_idx": 49 + }, + { + "type": "image", + "img_path": "images/e7467303dc6f9aead4b04680ab549cd4c9987da28a7b8e07f93928c3cc115828.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 527, + 836, + 656 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Input Text: \"Follow the prompt and sketch condition below to generate a controllable image. The prompt is: a man holding on to the strings of a flying parachute.\"", + "bbox": [ + 155, + 659, + 805, + 691 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 186, + 696, + 284, + 713 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 377, + 696, + 433, + 710 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 517, + 698, + 640, + 712 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "SDXL1.0", + "bbox": [ + 720, + 693, + 787, + 705 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "w. ControlNet", + "bbox": [ + 699, + 707, + 805, + 720 + ], + "page_idx": 49 + }, + { + "type": "page_number", + "text": "50", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 49 + }, + { + "type": "image", + "img_path": "images/4f2a82196d91323ac6987c501fe192b87a37b8c7bd49cfefc255b54f649c96f1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 277, + 148, + 300, + 169 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Evaluation: Controllability and text consistency.", + "text_level": 1, + "bbox": [ + 302, + 154, + 710, + 172 + ], + "page_idx": 50 + }, + { + "type": "image", + "img_path": "images/ef305485daf2aa15d9580d3579fba6ec68a2f97825b971e73b7dda80fc7b27d3.jpg", + "image_caption": [], + "image_footnote": [ + "Input Text: \"Follow the prompt and pose condition below to generate a controllable image. The prompt is: Quarterly in a blue and white jersey with number 14, preparing to throw a football during a game.\"" + ], + "bbox": [ + 150, + 176, + 315, + 277 + ], + "page_idx": 50 + }, + { + "type": "image", + "img_path": "images/3015117dffd29de40979d63938f43194ed356ec24b893f0eca89a8e4d2397504.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 320, + 176, + 490, + 277 + ], + "page_idx": 50 + }, + { + "type": "image", + "img_path": "images/49309cffd79e60cb0270f525f65a35ec7a1dda136d91a8690b5a4235fa3ad428.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 176, + 665, + 279 + ], + "page_idx": 50 + }, + { + "type": "image", + "img_path": "images/4f0a2ccea09917b8073951d104a30402dee27f0a79f45727ae62f4aca6252dac.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 176, + 836, + 279 + ], + "page_idx": 50 + }, + { + "type": "image", + "img_path": "images/1529d55096562ea1287a6e80927c823cf3ef85f6ff2fc349534df8add404dfe4.jpg", + "image_caption": [], + "image_footnote": [ + "Input Text: \"Follow the prompt and pose condition below to generate a controllable image. The prompt is : A young woman with long brown hair, wearing a blue strapless dress and a black necklace with a butterfly pendant, poses against a beige background.\"" + ], + "bbox": [ + 150, + 330, + 316, + 476 + ], + "page_idx": 50 + }, + { + "type": "image", + "img_path": "images/cbc8f5122209a93c7fcd5c01f787a5328fd12da73ba30b8fe0fa36d8089f19ee.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 320, + 330, + 491, + 476 + ], + "page_idx": 50 + }, + { + "type": "image", + "img_path": "images/b6efdeb3733117ab0264a27bea6c1da70b95aac300ccfe78cc7e7eaf29cc41bc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 329, + 663, + 476 + ], + "page_idx": 50 + }, + { + "type": "image", + "img_path": "images/386ab41549ccbbdcb6953f2cfb0c22f29b59c77cb08a11e2d82aaf042ab0a3a4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 329, + 836, + 476 + ], + "page_idx": 50 + }, + { + "type": "image", + "img_path": "images/12d972a127e37a6219e4bd892eadd138cfbb736fb6916e3a8ea8c8cccd698774.jpg", + "image_caption": [], + "image_footnote": [ + "Input Text: \"Follow the prompt and pose condition below to generate a controllable image. The prompt is : A woman is performing a pull-up exercise on a gym rack.\"" + ], + "bbox": [ + 150, + 531, + 315, + 685 + ], + "page_idx": 50 + }, + { + "type": "image", + "img_path": "images/eff06e3cb3442fb9e89cec40098e50870a9ded39e1f69753076da696882aa86d.jpg", + "image_caption": [], + "image_footnote": [ + "Figure 39: Task: Pose-to-image generation, aiming to synthesize realistic images that reflect both the human pose and descriptive prompt. Setup: We benchmark GPT-4o against Gemini 2.0 Flash [99] and SD3 Medium w. ControlNet [27], evaluating their ability to follow pose conditions while generating semantically accurate and coherent images. Observations: GPT-4o performs well in complex scenes—such as the football example—where it effectively integrates pose, clothing, and background with strong realism, contextual and pose accuracy. In simpler cases like the pull-up exercise, it shows occasional pose drift, especially in limbs. SD3 Medium w. ControlNet [27] offers better pose fidelity overall, though its visual quality can be inconsistent. Gemini 2.0 Flash underperforms in both structure and coherence, often generating anatomically incorrect or visually weak results. Overall, GPT-4o balances text understanding and generation quality, especially in detailed prompts." + ], + "bbox": [ + 320, + 531, + 488, + 685 + ], + "page_idx": 50 + }, + { + "type": "image", + "img_path": "images/c9e2f0970eb2e881c87f2518165ec8fafd6765bc14d8e17f59829268ad6411c8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 531, + 665, + 685 + ], + "page_idx": 50 + }, + { + "type": "image", + "img_path": "images/b7302a93838bd0514ce6b4544837c9f5854b7f09d9a31b036a582c5735883900.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 531, + 836, + 685 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 186, + 729, + 284, + 746 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 375, + 729, + 433, + 743 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 516, + 728, + 640, + 741 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "SD3 Medium w. ControlNet", + "bbox": [ + 705, + 719, + 810, + 750 + ], + "page_idx": 50 + }, + { + "type": "header", + "text": "Pose-to-Image", + "bbox": [ + 174, + 117, + 290, + 133 + ], + "page_idx": 50 + }, + { + "type": "page_number", + "text": "51", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Mask-to-Image", + "bbox": [ + 169, + 106, + 292, + 123 + ], + "page_idx": 51 + }, + { + "type": "image", + "img_path": "images/3395f7e5893f7029588b9d5b16f3549b7c5baa04869e047e731f2d72c81f6981.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 277, + 133, + 300, + 152 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Evaluation: Controllability and text consistency.", + "text_level": 1, + "bbox": [ + 300, + 138, + 712, + 156 + ], + "page_idx": 51 + }, + { + "type": "image", + "img_path": "images/3b637fcfa8060bbf723d957b7f4a1aa49a5f9d87c744993d0514d9873e23989e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 148, + 165, + 316, + 294 + ], + "page_idx": 51 + }, + { + "type": "image", + "img_path": "images/8175a842bf2f363a8a70eb3f0550f36e400f3fdd4bd3f80909fb7879a8e130c1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 320, + 166, + 490, + 295 + ], + "page_idx": 51 + }, + { + "type": "image", + "img_path": "images/6e8d8fa6a7fc992e352dc0100c93543a6693728f8a6fe64aac51f54c477a6736.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 495, + 166, + 663, + 295 + ], + "page_idx": 51 + }, + { + "type": "image", + "img_path": "images/494f4bc560feb0a0c69c549d9e94d552cb214aa34f2e532c73c99c615ff3a691.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 166, + 836, + 295 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Input Text: \"Follow the prompt and pose condition below to generate a controllable image. The prompt is : A peaceful indoor church scene with a plain wall, stained glass windows, a wooden podium, and a stone altar under soft sunlight.\"", + "bbox": [ + 155, + 299, + 797, + 345 + ], + "page_idx": 51 + }, + { + "type": "image", + "img_path": "images/8d843e8b140fd3c6f219b02b54f981e68e8501565fcef5774b45f8e54c5de112.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 150, + 349, + 316, + 477 + ], + "page_idx": 51 + }, + { + "type": "image", + "img_path": "images/e9707ee40f1e21949b575b798c93018191eaeb0953a7978ef48d77face6376a1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 320, + 348, + 488, + 476 + ], + "page_idx": 51 + }, + { + "type": "image", + "img_path": "images/41fa18b12458faa6a8dc00ecb9ddebef31263254df2985d4bd17d39f0aee636c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 348, + 663, + 476 + ], + "page_idx": 51 + }, + { + "type": "image", + "img_path": "images/fd41fad2fe31ea862cb86cef72fd1fa75256598dab78c33edf42129c6eb8f4a1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 348, + 836, + 476 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Input Text: \"Follow the prompt and pose condition below to generate a controllable image. The prompt is : An indoor aquarium scene with a large fish tank full of colorful tropical fish swimming. The fish tank is surrounded by walls and has a visible floor at the bottom. The environment is bright and underwater-themed.\"", + "bbox": [ + 155, + 482, + 830, + 542 + ], + "page_idx": 51 + }, + { + "type": "image", + "img_path": "images/32972f2863a4ed568d406e940ec73fb2acb89d05a9ce88264831494aca2909d5.jpg", + "image_caption": [ + "Figure 40: Task: Mask-to-image generation, which requires translating semantic segmentation maps and textual prompts into coherent and realistic images. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and SD1.5 w. ControlNet [90], focusing on their ability to combine spatial layout from the mask with deeper scene understanding from the prompt. Observations: Compared to previous control tasks, this setting demands more from the model in terms of semantic reasoning and compositional understanding. GPT-4o excels in this regard, producing visually consistent scenes that align with the prompt's intent—such as the serene church interior and the immersive aquarium setting with visitors. However, in fine-grained spatial control, especially with small or tightly shaped objects like tropical fish, SD1.5 w. ControlNet [90] performs better in preserving shape and positioning. Gemini 2.0 Flash continues to struggle in both fidelity and adherence to masks, often missing key scene elements or producing oversimplified outputs." + ], + "image_footnote": [], + "bbox": [ + 148, + 545, + 316, + 674 + ], + "page_idx": 51 + }, + { + "type": "image", + "img_path": "images/97eac775ab7fe9404225b9ec41d4b00866061d69df88ce9adc4b8d365ba02992.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 320, + 546, + 488, + 672 + ], + "page_idx": 51 + }, + { + "type": "image", + "img_path": "images/de15cb94a339b40e3ca5dcb35e27521f86d056376dba353e39d9dfc849b5350a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 546, + 663, + 672 + ], + "page_idx": 51 + }, + { + "type": "image", + "img_path": "images/c99eb6ee2f33a13babe23a2160bddcbb33b49dbaaa63c5c80ad3c9928ada400c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 546, + 836, + 674 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Input Text: \"Follow the prompt and mask condition below to generate a controllable image. The prompt is: An indoor aquarium with a large fish tank and colorful tropical fish, with a few visitors in the scene.\"", + "bbox": [ + 155, + 676, + 833, + 720 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 186, + 727, + 285, + 744 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "GPT40", + "bbox": [ + 377, + 727, + 434, + 742 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 516, + 727, + 642, + 742 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "SD w. ControlNet", + "bbox": [ + 684, + 727, + 818, + 742 + ], + "page_idx": 51 + }, + { + "type": "page_number", + "text": "52", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "2.2.7 Camera Control", + "text_level": 1, + "bbox": [ + 127, + 90, + 295, + 104 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Although recent visual generative models demonstrate remarkable capabilities in creating high-quality images, generating images with specific camera settings (e.g., bokeh blur parameters, focal length, shutter speed, color temperature) and making further adjustments remains a challenging task. We further explore GPT-4o's performance in camera control, evaluating its ability to generate images with desired photographic parameters in text instructions. This task is particularly significant as it bridges the gap between artistic creativity and technical precision, enabling users to simulate professional photography techniques and achieve greater control over the visual output. Such advancements have broad applications in fields like photography, cinematography, and visual design.", + "bbox": [ + 125, + 114, + 870, + 212 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Specifically, we collect text prompts from [118], and compare GPT-4o and Gemini 2.0 Flash [99] with Generative Photography (GP) [118]. The results are reported in Figures 41, 42. We can observe that GPT-4o achieves decent results in controlling bokeh blur parameters and color temperature, demonstrating its strong generalizability to various photographic settings. However, it still falls short in adjusting focal length and shutter speed, occasionally leading to inconsistent visual semantics or incorrect visual effects. By comparison, Gemini 2.0 Flash struggles significantly across all camera control scenarios, failing to produce coherent or accurate outputs that align with the specified photographic parameters, highlighting its limited capability in this domain.", + "bbox": [ + 125, + 218, + 867, + 316 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "In this task, GPT-4o shows promising potential in camera control, outperforming Gemini 2.0 Flash and achieving competitive results in certain aspects. Nonetheless, there remains room for improvement in handling more complex adjustments, which could further enhance its applicability in professional photography and creative industries.", + "bbox": [ + 125, + 321, + 867, + 364 + ], + "page_idx": 52 + }, + { + "type": "page_number", + "text": "53", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Camera Control", + "text_level": 1, + "bbox": [ + 174, + 148, + 295, + 162 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/541421ed5de9f82a9202335ca7678323b87627971bf9397ec165d6899894a416.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 215, + 176, + 236, + 195 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Evaluation: Camera setting adjustment, semantic consistency.", + "text_level": 1, + "bbox": [ + 240, + 180, + 772, + 198 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 151, + 232, + 207, + 244 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Gemini", + "bbox": [ + 148, + 284, + 202, + 297 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "2.0 Flash", + "bbox": [ + 140, + 301, + 210, + 315 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "GP", + "bbox": [ + 163, + 359, + 186, + 373 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/57830b99a43f06f49ba04d1b2f5ded94ea4df8175531c29f23aca0f89a53b83b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 214, + 203, + 333, + 266 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/91df94d12a7739436d063cd8168ea4e4d26c88f28f02f9407f230adabe8d29e6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 214, + 267, + 333, + 330 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/0ec6090994cbd2295c92ceaabd8108bda9fc1d44cff795793c63c2f03a6f1419.jpg", + "image_caption": [ + "28.0" + ], + "image_footnote": [], + "bbox": [ + 214, + 330, + 333, + 395 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/f69e3b0379013765f2adbc1aca797b0298039407cd2b3e804055f8aa7b5d7129.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 203, + 455, + 266 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/e06a1d22dd6093751188bb61a4da58c22606454753b4f5ef255503039c23b4d9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 267, + 455, + 329 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/c037197eb3df793fe54ce69be58db82135ee54f307b277b1651e4aea252edd89.jpg", + "image_caption": [ + "14.0" + ], + "image_footnote": [], + "bbox": [ + 336, + 330, + 455, + 395 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/7617257b54d51121e2b378531c54403d5f093b828cb72764f28cbaf8e2d75466.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 460, + 203, + 578, + 266 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/cbefa33dacb0fcc87c8295a83dda71231a6f564b3f6a39b28e9b44a76d014c28.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 460, + 267, + 578, + 329 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/0abff9671077ea8920b3cfaa655376afa170146e8731c668c911bd36367fe676.jpg", + "image_caption": [ + "10.0" + ], + "image_footnote": [], + "bbox": [ + 460, + 330, + 578, + 395 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/3f74eb072bab2b800151c5b569b4d234ff14848a96ea3fa56d91a676cf3c70bf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 583, + 203, + 702, + 266 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/3fe16c86a896d908033e9f78ddd28e8ff19dae54bfcbf58eef865c9f9147a115.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 583, + 267, + 700, + 329 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/e138171044c5ed197f98e471ee68b0a7988c369038ddde3296f693594e3ed2c6.jpg", + "image_caption": [ + "6.0" + ], + "image_footnote": [], + "bbox": [ + 583, + 330, + 702, + 395 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/4ae472a9c88dd869cf1cee61a0638cf7a434e0355bfd8577ac37c979ef8d008a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 705, + 203, + 826, + 266 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/02202d497506f20a18941658d169deee92ee7a8b17e62ab0a67e4f38d7fc3f81.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 705, + 267, + 825, + 329 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/359582282a17e20740a8e878147c496925396e8e780d4f8429b70706e235f66f.jpg", + "image_caption": [ + "2.0" + ], + "image_footnote": [], + "bbox": [ + 705, + 330, + 826, + 395 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 153, + 497, + 209, + 511 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Gemini", + "bbox": [ + 151, + 553, + 202, + 566 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "2.0 Flash", + "bbox": [ + 143, + 571, + 210, + 584 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "GP", + "bbox": [ + 168, + 625, + 189, + 637 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/6ac4b0f6100f66c6e3a3675e85edf101192eaa5e7afac67acbc73dad433f98df.jpg", + "image_caption": [ + "Input Text: \"A horse with a white face stands in a grassy field, looking at the camera; with bokeh blur parameter *\" & \"Adjust the bokeh blur parameter to *\" (* indicates a specific value)." + ], + "image_footnote": [], + "bbox": [ + 217, + 469, + 334, + 532 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/f13c64cd986d4479510b7a4819c3393ff74c88928cb3d8ab9642dde087405fb8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 217, + 534, + 334, + 595 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/8ba638159d6af8b00306bfa770db1930683a70fb3c84b43764507be8b7b6f124.jpg", + "image_caption": [ + "24.9" + ], + "image_footnote": [], + "bbox": [ + 217, + 597, + 334, + 659 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/032dd6d95007fdd7dba51b274352d33e5c899e1f0a93a08947da88f005ed8904.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 470, + 455, + 532 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/32ffc1eb96999685c0b89d169c9aa5316abd2e06ff28b61ba0beef011cd7d160.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 534, + 455, + 595 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/6f7393447d4c0dd9cc43b403fb767ccae61d13073cbf4e7b6de1e3bbffbc5cfd.jpg", + "image_caption": [ + "36.9" + ], + "image_footnote": [], + "bbox": [ + 339, + 597, + 457, + 659 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/e9ec594631498e2a1f8e661bedae31c443f9b190e1be8f521a61188f4d8f033b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 460, + 470, + 578, + 532 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/d489e64ba5236d20a8ccd8750097eec10c9d7fb5eb57f9ae796871bfa2203c22.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 460, + 534, + 578, + 595 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/7c825dcf01a94678b59a1ac7f191122428497b7adf966110443aeb1232b68996.jpg", + "image_caption": [ + "48.9", + "Input Text: \"A beautiful garden filled with red roses and green leaves; with * mm lens\" & \"Adjust the lens to * mm\"." + ], + "image_footnote": [], + "bbox": [ + 460, + 597, + 580, + 659 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/7b779ff8fb58b3aae04af13166a3b1967efcf87892bd378886be402ea8763601.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 584, + 470, + 702, + 532 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/889fe3842b7457a957d4f92c06ccec58df898a9c56de1a94074c190c063efac7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 584, + 534, + 702, + 595 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/b5bfd8b85c6c3bc556cebefab62a44ff37c4d3e8e69240fa38a6ac13c7cc168b.jpg", + "image_caption": [ + "60.9", + "Figure 41: Task: Camera control. The goal is to generate images aligned with specific photographic parameters, such as bokeh blur, focal length, shutter speed, and color temperature. Setup: Results are based on text prompts collected from [118], comparing outputs from GPT-4o, Gemini 2.0 Flash [99], and Generative Photography (GP) [118]. Each row includes the input text instructions and corresponding outputs. Observations: GPT-4o demonstrates strong performance in controlling bokeh blur, producing visually appealing and parameter-aligned results. However, it shows limitations in handling focal length, occasionally generating inconsistent or less accurate outputs. By contrast, Gemini 2.0 Flash struggles significantly in both aspects, often failing to produce coherent results. Overall, GPT-4o achieves better performance in this task but still requires further refinement to enhance focal length control." + ], + "image_footnote": [], + "bbox": [ + 584, + 597, + 702, + 659 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/099e4ec78e062250f7bdab074cc4518ca8ed428e187a0bb348ddc4b8e86c87a1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 705, + 470, + 826, + 532 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/74da7612dd6d92ecd657e1f44bdbfc6138a44b8f5bad9b26166f4cd12e4bcb65.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 705, + 534, + 826, + 595 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/e3c6465c6ac99b5cac8b7b87979c048e1ec6ea06f3001201cdafef489c4470e1.jpg", + "image_caption": [ + "69.9" + ], + "image_footnote": [], + "bbox": [ + 705, + 597, + 826, + 659 + ], + "page_idx": 53 + }, + { + "type": "page_number", + "text": "54", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Camera Control", + "text_level": 1, + "bbox": [ + 174, + 152, + 295, + 166 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Evaluation: Camera setting adjustment, semantic consistency.", + "text_level": 1, + "bbox": [ + 241, + 181, + 772, + 200 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 153, + 231, + 209, + 243 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/b0d136fb714a9be563b2f3204d0e0bad1bfffa49452c0d77a5bdfb3a14e364d0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 217, + 207, + 334, + 268 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/6353301ea1a4bd02c64e31effbe95a706a80640c23bedd6dff2583457c4933e0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 207, + 457, + 268 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/f748ae519d07deeba518763dcddb8fac9d6dd17225e417618ff62357c72bb0c3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 462, + 207, + 578, + 268 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/a145180a5acb6bef4bb07f8a9b725b80a966f60fdc5991366b23f4cc7d100d3d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 583, + 207, + 702, + 268 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/d215069ec65dc0f111efbae6b16c00981d2ebba9711197049d3d10d379a5fe6f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 705, + 207, + 825, + 268 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 147, + 287, + 210, + 320 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/34364cd752d523f7253e2bed26e26e7bb958064d8b44d8df3b0c491d05214cb2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 217, + 268, + 334, + 332 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/8107bc9ed92dde4a8075837a95dd9e6711b30dd5e1a18eb7914511ac730f010f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 268, + 457, + 332 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/e8ab46f36bede74d3eb3841233ac4c2a6eb5643b1d97d4921f99dd2c221aef40.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 462, + 268, + 580, + 332 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/a26d98ea46dba3acd2775aa3134da02bf0e703e9b8a805247053fdfe1b12d8bc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 584, + 268, + 702, + 332 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/caed113ea41a0bbb54c520a911d0e22a31ace6434333320e221dd66e5f86ef33.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 707, + 268, + 825, + 332 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "GP", + "bbox": [ + 171, + 359, + 192, + 373 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/9d2ccc2dc8ada5802d342f8533d6e9a320919119569352e82afb4bf3500698e0.jpg", + "image_caption": [ + "0.88" + ], + "image_footnote": [], + "bbox": [ + 217, + 333, + 334, + 396 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/dd17353d44f66bbb4b35301466dd1088e56a6dbb759f8d6e1a798e291070cdb4.jpg", + "image_caption": [ + "0.68" + ], + "image_footnote": [], + "bbox": [ + 341, + 333, + 457, + 396 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/c65293d5487451040c046c09049d817ac48c8d7e2a82e633e0d317f853e37512.jpg", + "image_caption": [ + "0.48" + ], + "image_footnote": [], + "bbox": [ + 462, + 333, + 580, + 396 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/1a0a8291fc0eb54d480890af32530069802f1d52bc82f8da414ec15032e47f7b.jpg", + "image_caption": [ + "0.38" + ], + "image_footnote": [], + "bbox": [ + 584, + 333, + 702, + 396 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/0b0cfcb2c756d34ecf6d719996e6b93b05c237d3b9298e8a45b297f87db15d04.jpg", + "image_caption": [ + "0.28" + ], + "image_footnote": [], + "bbox": [ + 705, + 333, + 825, + 396 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 153, + 483, + 209, + 496 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/2f8ba2525f180355e9429c8aa78eab3fdc5e1c1c77db35dd88729c6608f735e8.jpg", + "image_caption": [ + "Input Text: \"A blue pot with a plant in it is placed on a window sill, surrounded by other potted plants; with shutter speed * second\" & \"Adjust the shutter speed to * second\"." + ], + "image_footnote": [], + "bbox": [ + 217, + 458, + 334, + 520 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/e2bafc2e24743ad788bd64f544fbd0056daacff8e5ccf84bf8a7e7d5bd57b702.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 458, + 457, + 520 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/65a3cacd9307277d2c15e32c391395ee4d10278b0cde3e7007004223bb26c6c9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 462, + 458, + 580, + 520 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/4851ec2102693629a48d4c59a074117a859cc53c4e7a1a739b2b4623d6768712.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 584, + 458, + 702, + 520 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/c55187e3cf1b46125c32de8fa1ced6d268639035921d9b205dcf8ee53eaef80f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 707, + 458, + 826, + 520 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 147, + 539, + 214, + 571 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/fe6974e0162720276dc4d0b553ff8e8fdee18edf7ebaa81e761e6d01fa924c91.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 217, + 522, + 334, + 582 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/c30bd99d0cf615babc90bf1be839dd3875329913446e1f8f05070addcf8105c3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 523, + 457, + 582 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/de07423ffdd0dc744cf6f4c7b38144d08ecc3112098ef6d6efbfb06253fb9def.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 462, + 522, + 580, + 582 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/9df5012deab1c75c8cc3c9b199f8d51adaf06952378b5293634b0a061a791b0f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 584, + 523, + 702, + 582 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/76238724fe5435d83934f4d9ceaa07fd016a8c9970f6377cd6eb7afa1409a5d4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 707, + 523, + 826, + 582 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "GP", + "bbox": [ + 171, + 611, + 192, + 623 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/077315bfabdff852ee00ffcd33f6dc6141773a534acf687e5c1e7ad5284a7a19.jpg", + "image_caption": [ + "3100.0" + ], + "image_footnote": [], + "bbox": [ + 217, + 583, + 334, + 645 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/334e57c72d615f0c16cf7f289ed3e1236adf82ed7e69d06552fc24a1910711db.jpg", + "image_caption": [ + "4000.0" + ], + "image_footnote": [], + "bbox": [ + 339, + 583, + 457, + 645 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/838fcf545b25f35bfeec3bef84c6f0c0db6ecfcb9f88eaec03caefa49c2b0b1c.jpg", + "image_caption": [ + "8000.0", + "Input Text: \"A collection of trash cans and a potted plant are seen in the image. The trash cans are individually in blue, black and yellow; with temperature * kelvin\" & \"Adjust the temperature to * kelvin\"." + ], + "image_footnote": [], + "bbox": [ + 462, + 583, + 580, + 645 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/3569ee4edc461e7da1c8372a4a522d92cca61c99b430368708a6b7dc82703262.jpg", + "image_caption": [ + "7000.0", + "Figure 42: Task: Camera control. The goal is to generate images aligned with specific photographic parameters, such as bokeh blur, focal length, shutter speed, and color temperature. Setup: Results are based on text prompts collected from [118], comparing outputs from GPT-4o, Gemini 2.0 Flash [99], and Generative Photography (GP) [118]. Each row includes the input text instructions and corresponding outputs. Observations: GPT-4o demonstrates strong performance in controlling color temperature, producing coherent and visually accurate results. However, it struggles with shutter speed, occasionally resulting in inconsistent or unrealistic motion effects. In contrast, Gemini 2.0 Flash fails to consistently handle either parameter, often producing outputs that lack alignment with the desired settings. Overall, GPT-4o outperforms Gemini 2.0 Flash in this task, but further improvements are needed for precise shutter speed control." + ], + "image_footnote": [], + "bbox": [ + 584, + 583, + 702, + 645 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/a99706fb05eb2a9239f7691575789b91486fcd20c7175b2b5dd9b1fbd088f850.jpg", + "image_caption": [ + "3000.0" + ], + "image_footnote": [], + "bbox": [ + 707, + 583, + 826, + 645 + ], + "page_idx": 54 + }, + { + "type": "page_number", + "text": "55", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "2.2.8 In-context Visual Prompting", + "text_level": 1, + "bbox": [ + 127, + 90, + 380, + 107 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "The in-context visual prompting tasks aim at understanding and executing specific tasks on new query images by leveraging a pair of task-specific example images and accompanying text instructions. Previous works [105, 18, 52] have explored this capability in the context of diffusion and autoregressive models, demonstrating its potential in enhancing model adaptability. The significance of in-context visual prompting lies in its ability to enable models to generalize to novel tasks. This approach mirrors human-like learning, where new tasks can be understood and performed by observing relevant examples. This capability has broad implications across various domains, and paves the way for more flexible and efficient paradigms capable of adapting to a wide range of specific tasks.", + "bbox": [ + 125, + 114, + 870, + 213 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "We curate four representative tasks to evaluate the performance of GPT-4o in in-context visual prompting. These tasks are designed to assess the model's ability to understand and adapt to specific visual tasks based on provided examples and guidance, including:", + "bbox": [ + 125, + 218, + 870, + 261 + ], + "page_idx": 55 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Movie-Shot Generation: A three-shot image collected from [42] is provided as an example, and the model is instructed to follow this format to generate similar movie shots for the query image.", + "- Ray-Tracing Rendering: An example gaming scene is provided with and without ray tracing, and the model is expected to render a ray-traced version of the query image.", + "- Overlaid Mask Visualization: The model receives an original image accompanied by its corresponding segmented results from [49] and is tasked with outputting the segmented results in the same format for the query image.", + "- Maze Solving: A maze and its corresponding solution path are provided as examples, and the model is required to draw the solution path for a new maze presented in the query image." + ], + "bbox": [ + 171, + 271, + 866, + 411 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "All the results are illustrated in Figure 43. Compared with Gemini 2.0 Flash [99], GPT-4o demonstrates promising performance in movie-shot generation and ray-tracing rendering tasks, showcasing its ability to follow example formats and generate visually coherent outputs. However, it still struggles with maintaining consistent visual semantics across the generated outputs. For the overlaid mask visualization task, GPT-4o falls short in effectively executing the instructions. The result fails to adhere to the required format, indicating that the model's ability to process and generate complex outputs remains limited. For maze solving, a task that demands advanced visual reasoning and logical inference, GPT-4o struggles significantly. This highlights the challenges in combining higher-level reasoning with visual generation capabilities, suggesting that more sophisticated reasoning mechanisms are needed for tasks of this nature.", + "bbox": [ + 125, + 422, + 870, + 547 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "In summary, GPT-4o shows considerable potential in in-context visual prompting, while it still underperforms in certain difficult tasks. These observations suggest that further advancements are necessary to enhance its generation and reasoning capabilities for more complex and diverse visual tasks.", + "bbox": [ + 125, + 553, + 870, + 595 + ], + "page_idx": 55 + }, + { + "type": "page_number", + "text": "56", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "In-Context Visual Prompting", + "text_level": 1, + "bbox": [ + 200, + 112, + 397, + 127 + ], + "page_idx": 56 + }, + { + "type": "image", + "img_path": "images/751b8b0b723ad8d89ba3583fc6932d850eb7cf8a76f7215ed994d255821fb63e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 192, + 137, + 212, + 154 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "Evaluation: Understanding and executing specific tasks with example images.", + "text_level": 1, + "bbox": [ + 215, + 141, + 813, + 157 + ], + "page_idx": 56 + }, + { + "type": "image", + "img_path": "images/7e5a6dca5061982b8b4f23f693eb84c50a375b77e068c461c42e68104c157985.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 196, + 162, + 276, + 244 + ], + "page_idx": 56 + }, + { + "type": "image", + "img_path": "images/70e1fc27eedb02031d28c9885667468714afce1b37646dd33a83ef114dce4e78.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 277, + 162, + 439, + 244 + ], + "page_idx": 56 + }, + { + "type": "image", + "img_path": "images/5f022a9d8700e671b60295a4ae5dd57ff7c7747fa42c0d5b9d6fb09f4fe1b014.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 439, + 164, + 511, + 246 + ], + "page_idx": 56 + }, + { + "type": "image", + "img_path": "images/49f93c593df590166d47657e6c0eda2ed7ef62cdd094e685de672efaec343681.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 164, + 808, + 246 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "Input Text: \"The first image contains three movie shots. Please imitate this image and create the subsequent movie shots for the second image.\"", + "bbox": [ + 194, + 248, + 785, + 276 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 197, + 281, + 274, + 294 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 320, + 281, + 395, + 294 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 454, + 281, + 498, + 292 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 612, + 282, + 710, + 294 + ], + "page_idx": 56 + }, + { + "type": "image", + "img_path": "images/e63aaf3e7583e329733bdf0d0086b196c266732e65fcc124b54b13a2ae6da734.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 256, + 301, + 705, + 371 + ], + "page_idx": 56 + }, + { + "type": "image", + "img_path": "images/05f1f17e9903bfc312375e98f666c3fae0cca18b6d2b7c60fe1605e04fefa3d0.jpg", + "image_caption": [ + "Input Image", + "Input Image", + "Gemini 2.0 Flash" + ], + "image_footnote": [], + "bbox": [ + 316, + 390, + 653, + 460 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "Input Text: \"The first image includes an original gaming scene, and the scene enhanced with ray tracing. Please imitate this image and create the scene enhanced with ray tracing for the second image.\"", + "bbox": [ + 181, + 478, + 815, + 520 + ], + "page_idx": 56 + }, + { + "type": "image", + "img_path": "images/27c5eea52e6ca0c737ae2cf46a953e11f8a44064e52b132ad803df249ab60e5c.jpg", + "image_caption": [ + "Input Image" + ], + "image_footnote": [], + "bbox": [ + 187, + 525, + 431, + 592 + ], + "page_idx": 56 + }, + { + "type": "image", + "img_path": "images/0755edc88c21d2311018ccff73e7808905f504f9fd16b4b534f5cee1942b18b2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 433, + 525, + 552, + 590 + ], + "page_idx": 56 + }, + { + "type": "image", + "img_path": "images/2c63f34552d2fef8c8e86f7dd76a62315f4c1dd4bcb26b2198e0e7acd23d6ef7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 557, + 525, + 679, + 592 + ], + "page_idx": 56 + }, + { + "type": "image", + "img_path": "images/1fb2c646d1d34c3087486d5485c780d38c7e518b41b9210914f00d583ccfd2b1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 525, + 805, + 592 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "Input Text: \"The first image shows an original image and its segmented results. Please imitate this image and output the segmented results in the same format for the second image.\"", + "bbox": [ + 181, + 594, + 823, + 623 + ], + "page_idx": 56 + }, + { + "type": "image", + "img_path": "images/4702bc6682345e1ecd0d870da7f607f3851d574f86f2af27b69b7ce8feb8d7b3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 204, + 660, + 276, + 715 + ], + "page_idx": 56 + }, + { + "type": "image", + "img_path": "images/4cefd3a472618bf4ed1b900b8f0253d8ca072ad60e16e238d601280a1ff4ac20.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 292, + 660, + 361, + 717 + ], + "page_idx": 56 + }, + { + "type": "image", + "img_path": "images/a05ae86c57f255f29915d6f1ca7fbca481e453d5260a2ee80c1df5f1a84c31e1.jpg", + "image_caption": [ + "Input Image" + ], + "image_footnote": [], + "bbox": [ + 390, + 655, + 475, + 720 + ], + "page_idx": 56 + }, + { + "type": "image", + "img_path": "images/fe38be0bc78d431b48a8e0538e8cea074d5ce5e421511c2e081ea26e6ad8173c.jpg", + "image_caption": [ + "GPT 40" + ], + "image_footnote": [], + "bbox": [ + 491, + 655, + 573, + 719 + ], + "page_idx": 56 + }, + { + "type": "image", + "img_path": "images/acc8a88501794d5cb5abb73802e25f714233bc33723c8874cf9c6c27f2a19f7d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 599, + 665, + 673, + 717 + ], + "page_idx": 56 + }, + { + "type": "image", + "img_path": "images/2a04299128063c19b3d18cf2c84c9a40f476b3c8ef92b747667e18981dce0475.jpg", + "image_caption": [ + "Gemini 2.0 Flash" + ], + "image_footnote": [], + "bbox": [ + 689, + 665, + 761, + 717 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "Input Text: \"The first image displays an unsolved maze and the maze with a solution path in red. Please imitate this image and identify the solution path for the second image.\"", + "bbox": [ + 184, + 729, + 812, + 758 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 241, + 765, + 318, + 777 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 388, + 765, + 464, + 777 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 509, + 765, + 552, + 776 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 630, + 765, + 727, + 776 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "Figure 43: Task: In-context visual prompting. The goal is to perform specific visual tasks on new query images based on task-specific example images and text instructions. Setup: Four representative tasks are evaluated: movie-shot generation, ray-tracing rendering, overlaid mask visualization, and maze solving. Each row includes example images, query images, and the corresponding outputs. Observations: GPT-4o excels in movie-shot generation and ray-tracing, producing coherent outputs but lacks consistency in visual semantics. It fails with overlaid mask visualization and maze solving, showing limits in complex task integration. While promising for in-context visual prompting, it needs refinement for more complex and reasoning-intensive tasks.", + "bbox": [ + 125, + 795, + 870, + 893 + ], + "page_idx": 56 + }, + { + "type": "page_number", + "text": "57", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "2.3 Image-to-3D Tasks", + "text_level": 1, + "bbox": [ + 127, + 90, + 302, + 104 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "We evaluate the 3D understanding capabilities from 2D images of GPT-4o across three tasks: 2D image-to-3D modeling, 2D UV map-to-3D rendering, and novel view synthesis.", + "bbox": [ + 125, + 116, + 870, + 146 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "2.3.1 Image to 3D modeling", + "text_level": 1, + "bbox": [ + 127, + 159, + 336, + 174 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "Generating 3D models from monocular images boosts a wide range of applications, including augmented reality, virtual reality, and the gaming industry. This capability not only facilitates the content creation process but also mitigates the reliance on specialized 3D artists for creating 3D assets, which is more time- and cost-effective. Therefore, there is a growing research interest in generating 3D models from 2D images. Early methods on image-to-3D employ the learning-based approaches for single-view reconstruction [74, 77, 102, 79]. Recent works leverage the diffusion model prior to perform image-conditioned 3D generative modeling [69, 68, 83, 113].", + "bbox": [ + 125, + 183, + 870, + 268 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "In this section, we investigate the potential of GPT-4o for 3D modeling from 2D images. We begin by prompting GPT-4o to generate a Cinema 4D modeling interface to test its ability to produce coherent representations of structure, material, and wireframe based on the input image. As shown in Figure 44, GPT-4o can generate high-quality 3D model renderings within the application interface. Notably, the generated models exhibit clear wireframes and textures consistent with the input images. In contrast, Gemini 2.0 Flash and Midjourney v6.1 fail to achieve comparable results under the same conditions, which produce inconsistent renderings. We then prompt the GPT-4o to generate corresponding 3D object and material files in .obj and .mtl formats to further evaluate its understanding of the underlying structure in the rendered images. However, the output 3D models are coarse and inconsistent with input images, indicating that although GPT-4o can produce visually coherent 3D renderings, its capability to transform these into accurate and usable 3D object files remains limited. Additionally, Gemini 2.0 Flash and Midjourney v6.1 do not support exporting 3D models.", + "bbox": [ + 125, + 272, + 870, + 426 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "2.3.2 UV Map to 3D rendering", + "text_level": 1, + "bbox": [ + 127, + 439, + 356, + 455 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "UV maps are 2D images that store texture information for 3D models. In 3D modeling, geometric data is represented in 3D space, while texture data is defined in a 2D texture space. UV mapping is the process of projecting a 2D UV map onto a 3D model, accurately aligning texture with geometry. The UV mapping process can evaluate models' capability for 3D perception and spatial understanding. Moreover, this task has broad applications in design, helping to reduce the burden on designers to create product renderings from 2D maps manually and provide useful references.", + "bbox": [ + 125, + 462, + 869, + 546 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "As shown in Figure 45, GPT-4o exhibits a superior ability to generate consistent 3D renderings from 2D maps compared to Gemini 2.0 Flash and Midjourney v6.1. However, some outputs remain unsatisfactory, displaying inconsistencies in patterns and structure (see row 3 in Figure 45). Gemini 2.0 Flash struggles to correctly wrap the 3D model, though it maintains pattern consistency. Midjourney v6.1 tends to introduce additional, imagined features, which reduce controllability in this task.", + "bbox": [ + 125, + 551, + 869, + 623 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "2.3.3 Novel View Synthesis", + "text_level": 1, + "bbox": [ + 127, + 636, + 330, + 651 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "From a monocular view, humans can imagine an object's 3D shape and appearance since humans have collected enough prior knowledge for different objects throughout their daily lives. This ability to infer novel views of objects is essential for a wide range of tasks, from object manipulation to artistic creation such as painting. Early works achieve image-to-3D reconstruction using category-specific priors or large-scale pre-training [45, 80, 87, 32, 131]. Recent studies have shown that large diffusion models contain rich 3D prior information of the visual world, enabling them to perform novel view synthesis [69, 68, 83, 70]. These novel views can then be used for zero-shot 3D reconstruction using different 3D representations such as NeRF [76], mesh, or SDF.", + "bbox": [ + 125, + 660, + 870, + 758 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "In this section, we evaluate the ability of GPT-4o for novel view synthesis on objects with artistic styles and asymmetric geometry. As shown in Figure 46, for artistically styled objects, GPT-4o and Gemini 2.0 Flash largely preserve structural consistency with the input image, although they may change some elements or fine details. For the asymmetric object, GPT-4o can preserve the object scale and size better than Gemini 2.0 Flash. However, Midjourney v6.1 fails to generate consistent novel views, instead producing visually appealing images that do not align with the given prompt of this task.", + "bbox": [ + 125, + 763, + 870, + 849 + ], + "page_idx": 57 + }, + { + "type": "page_number", + "text": "58", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "Image to 3D Model", + "bbox": [ + 163, + 170, + 318, + 186 + ], + "page_idx": 58 + }, + { + "type": "text", + "text": "Evaluation: Shape/texture consistency, wireframe plausibility.", + "text_level": 1, + "bbox": [ + 253, + 202, + 767, + 220 + ], + "page_idx": 58 + }, + { + "type": "image", + "img_path": "images/1f9b0b011e88c337096f493dbdc16bbafefb87c7ac6d0817b51a02d65ae211a2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 163, + 226, + 321, + 347 + ], + "page_idx": 58 + }, + { + "type": "image", + "img_path": "images/20925173299f15c6d10bdcdcb9ec379e65b18d75af2a57d652be5a8ff0b8001d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 333, + 226, + 491, + 347 + ], + "page_idx": 58 + }, + { + "type": "image", + "img_path": "images/afb58fab9b72f3bf1b78d415b4d5e8ca817660113f2b9a1da7dee7153ca97330.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 226, + 663, + 347 + ], + "page_idx": 58 + }, + { + "type": "image", + "img_path": "images/1b8601f013b93c27baf25e41c09132b903155d83d0ba6b9bdcd83e2b33d24788.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 226, + 833, + 347 + ], + "page_idx": 58 + }, + { + "type": "image", + "img_path": "images/7add957f9d7531f536cfef81731bc6b28f75e12995ba9dbfe3e545658a323c7d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 163, + 348, + 321, + 469 + ], + "page_idx": 58 + }, + { + "type": "image", + "img_path": "images/8d55ee618dd53e3a5e154598b93d67afc79a4d7bf139ce03e31d740af22b29f0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 348, + 491, + 469 + ], + "page_idx": 58 + }, + { + "type": "image", + "img_path": "images/9149d06a0cd845a92028c8f8199da8213692d3c85666f692dd350f78144af753.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 348, + 663, + 469 + ], + "page_idx": 58 + }, + { + "type": "image", + "img_path": "images/666712e651824dc36d467ea97f632d637319b03414823161f4a04e13a521f767.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 348, + 833, + 469 + ], + "page_idx": 58 + }, + { + "type": "image", + "img_path": "images/c46b403e7ac2e945512ffa6d309f1e6f20a355598593e3758059233cf042e2e6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 165, + 470, + 321, + 592 + ], + "page_idx": 58 + }, + { + "type": "image", + "img_path": "images/b15793d17ca7c80bf1b909d279a47bdf5b3a139165326ca3c1dffd011f7b0722.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 470, + 491, + 592 + ], + "page_idx": 58 + }, + { + "type": "image", + "img_path": "images/875d4fca774409cc984e2951d3b6c99aa12608a7a78632a41fcd84f6bd223082.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 470, + 663, + 590 + ], + "page_idx": 58 + }, + { + "type": "image", + "img_path": "images/1a0ac4c5dee91e1501338bfcaaf66863752c28b3f6438eb24488b2e78087a942.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 470, + 834, + 590 + ], + "page_idx": 58 + }, + { + "type": "image", + "img_path": "images/f5a97061e0c642e8ad66652a68547da7ce296666b9869a78d2e0233544c9a488.jpg", + "image_caption": [ + "Figure 44: Task: Image-to-3D model rendering. Evaluate the 3D modeling ability given a 2D image. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o can generate better 3D model rendering with consistent shape, texture, and plausible wireframe than Gemini 2.0 Flash and Midjourney v6.1." + ], + "image_footnote": [], + "bbox": [ + 163, + 597, + 323, + 719 + ], + "page_idx": 58 + }, + { + "type": "image", + "img_path": "images/a11e7e5a889c46ee27cc17d0f091d77cb1c167e6c4314649ed219089b6bfe94a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 597, + 493, + 719 + ], + "page_idx": 58 + }, + { + "type": "image", + "img_path": "images/4b00229ed17a9f68b0777d94b5bc1f08385f38b87d3759265475e7b14d5c6df0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 597, + 665, + 719 + ], + "page_idx": 58 + }, + { + "type": "image", + "img_path": "images/67c90170597a34e84c1311ea2c3f363b473991486ba6bf0a15957d95db598921.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 597, + 833, + 718 + ], + "page_idx": 58 + }, + { + "type": "text", + "text": "Input Text: \"Generate a pre-render view of a C4D model, including the UI, wireframe and material.\"", + "bbox": [ + 148, + 729, + 854, + 744 + ], + "page_idx": 58 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 189, + 752, + 281, + 767 + ], + "page_idx": 58 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 383, + 751, + 436, + 763 + ], + "page_idx": 58 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 524, + 751, + 640, + 763 + ], + "page_idx": 58 + }, + { + "type": "text", + "text": "Midjourney v6.1", + "bbox": [ + 697, + 750, + 810, + 765 + ], + "page_idx": 58 + }, + { + "type": "page_number", + "text": "59", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 58 + }, + { + "type": "text", + "text": "2D UV map to 3D rendering", + "bbox": [ + 161, + 159, + 375, + 176 + ], + "page_idx": 59 + }, + { + "type": "image", + "img_path": "images/5df66609cbc1920bfd7bfab7731dee320fbf9bd64ef127bfe114205f6384aaa1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 193, + 354, + 210 + ], + "page_idx": 59 + }, + { + "type": "text", + "text": "Evaluation: Structure/pattern consistency.", + "text_level": 1, + "bbox": [ + 354, + 196, + 689, + 214 + ], + "page_idx": 59 + }, + { + "type": "image", + "img_path": "images/9410af00d870c0c0323ba5f3b9c30e57a1d3b4116a79dda9d22fbb7f6ef86c7f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 148, + 219, + 352, + 339 + ], + "page_idx": 59 + }, + { + "type": "image", + "img_path": "images/0673e0b100dc90c3154a0f84f46af572d02b7c0ce4f3ef16fade43705be3a169.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 359, + 219, + 521, + 340 + ], + "page_idx": 59 + }, + { + "type": "image", + "img_path": "images/39a867d5c76eb0ae5b81b696fa69ab0660b5b5250864e6b56ff6616c1d01cf18.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 527, + 219, + 683, + 340 + ], + "page_idx": 59 + }, + { + "type": "image", + "img_path": "images/f00108f1137a655c41931a3eca3d59fb3d37c83f487aa21775f7ef258746a7e5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 687, + 219, + 846, + 340 + ], + "page_idx": 59 + }, + { + "type": "image", + "img_path": "images/c720d7639945658d54f942ed378b4e165f730b8de7a268e21417078ae525472e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 148, + 343, + 349, + 465 + ], + "page_idx": 59 + }, + { + "type": "image", + "img_path": "images/50e1bcc4270a236adf25775994fc41829c4d6c762fc5cef6ca4d34dd425a43e5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 361, + 343, + 521, + 465 + ], + "page_idx": 59 + }, + { + "type": "image", + "img_path": "images/34545e79e79f3078a365a6cb3d938519fac2d3ed1ebd3db57368a178779167ae.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 527, + 343, + 684, + 465 + ], + "page_idx": 59 + }, + { + "type": "image", + "img_path": "images/e914339e66bb3680ffe4e499cab448aa16a263e1ab23c54fac562df95422444c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 691, + 345, + 844, + 467 + ], + "page_idx": 59 + }, + { + "type": "image", + "img_path": "images/028da69b538ec977850baf21c5611c81d88bf976beeeeeccb70474beba17944b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 150, + 467, + 352, + 598 + ], + "page_idx": 59 + }, + { + "type": "image", + "img_path": "images/915474b5475b9cfb5be03f5b44afa9a58d8047bd5530980a2d330a11b5d8294c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 361, + 468, + 521, + 598 + ], + "page_idx": 59 + }, + { + "type": "image", + "img_path": "images/391fb13f1c421b143115f7495cad2522d8d46310cd74bbe3fd2d5b33986486e1.jpg", + "image_caption": [ + "Figure 45: Task: 2D UV map to 3D rendering. Evaluate the 3D perception and spatial understanding ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o can generate better 3D renderings based on 2D maps than Gemini 2.0 Flash and Midjourney v6.1. However, structure and pattern inconsistencies still exist among these three models." + ], + "image_footnote": [], + "bbox": [ + 527, + 469, + 684, + 598 + ], + "page_idx": 59 + }, + { + "type": "image", + "img_path": "images/54e8f1e07b35c7646e6a1992f478ea6768d117a77bc4a4fc4afcd51a7f5a0920.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 687, + 469, + 846, + 598 + ], + "page_idx": 59 + }, + { + "type": "image", + "img_path": "images/3aca5dc86dd3946c68be0172061baa23975fed2075ecacb889e265f89089a1e4.jpg", + "image_caption": [ + "Input Text: \"Assemble this packaging cutout into a complete product and output a 3D rendered image.\"" + ], + "image_footnote": [], + "bbox": [ + 148, + 604, + 354, + 723 + ], + "page_idx": 59 + }, + { + "type": "image", + "img_path": "images/0a94fc94c0714aa10ea27a9a7909f7e8481229af8d64048c14d36241ab29679f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 361, + 603, + 521, + 724 + ], + "page_idx": 59 + }, + { + "type": "image", + "img_path": "images/302a3887daa57a9516027cfc6473b7293f8f9c6bd1007fa32f38d00d07543191.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 527, + 603, + 684, + 724 + ], + "page_idx": 59 + }, + { + "type": "image", + "img_path": "images/887ddccf2929d6f4372e9362922daad36a1526e449412d7bfea8a7165bea64b9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 689, + 603, + 846, + 724 + ], + "page_idx": 59 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 197, + 763, + 289, + 777 + ], + "page_idx": 59 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 393, + 762, + 447, + 775 + ], + "page_idx": 59 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 537, + 762, + 655, + 776 + ], + "page_idx": 59 + }, + { + "type": "text", + "text": "Midjourney v6.1", + "bbox": [ + 702, + 762, + 816, + 777 + ], + "page_idx": 59 + }, + { + "type": "page_number", + "text": "60", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 59 + }, + { + "type": "text", + "text": "Novel View Synthesis", + "text_level": 1, + "bbox": [ + 184, + 193, + 354, + 210 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "Evaluation: Consistency.", + "text_level": 1, + "bbox": [ + 395, + 224, + 611, + 244 + ], + "page_idx": 60 + }, + { + "type": "image", + "img_path": "images/2fc6af7cdb8c9f7e71021c537aa0aba9a3610880f2762857e8317da88256e3c0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 166, + 250, + 254, + 364 + ], + "page_idx": 60 + }, + { + "type": "image", + "img_path": "images/e502bbf30709fbc0fd7fe24f3e4886279e719fcc412c016e104a0028ed894e18.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 261, + 250, + 455, + 364 + ], + "page_idx": 60 + }, + { + "type": "image", + "img_path": "images/a8a92b2a237654ea8a1e5c6eca6a9384ebacc2f436fc6dc6a34e87dd215b9d99.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 459, + 251, + 643, + 364 + ], + "page_idx": 60 + }, + { + "type": "image", + "img_path": "images/763c96045715d809c9e422f9aa615105eac65d7140529630622d8010d75de173.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 648, + 250, + 846, + 364 + ], + "page_idx": 60 + }, + { + "type": "image", + "img_path": "images/3dbfbe5de303b95e93930efc7c11ef12558704eeaae8642a301b7dbc59420786.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 166, + 368, + 254, + 478 + ], + "page_idx": 60 + }, + { + "type": "image", + "img_path": "images/92052199d2635356816ee2aba2aed1f8dca8d83b493fd47666fe5985bfd49a89.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 263, + 369, + 455, + 478 + ], + "page_idx": 60 + }, + { + "type": "image", + "img_path": "images/d2dc794b351faf6fef98c864ed3e399cf079c26ef90eadbb34e0c6fc3e4be78c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 457, + 369, + 643, + 481 + ], + "page_idx": 60 + }, + { + "type": "image", + "img_path": "images/4ae43763ef324638250f7125268cf0a0cd1fadff5496f7eae1707c4aaad3ee1c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 650, + 369, + 844, + 481 + ], + "page_idx": 60 + }, + { + "type": "image", + "img_path": "images/97cb4999fb7546f2acbbc518825a138f6e835d048cf927abe4812e6a75004c0d.jpg", + "image_caption": [ + "Figure 46: Task: Novel view synthesis. Evaluate the 3D perception and spatial understanding ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o can generate better style and structure-consistent novel views for both artistic painting and asymmetric objects." + ], + "image_footnote": [], + "bbox": [ + 166, + 482, + 254, + 583 + ], + "page_idx": 60 + }, + { + "type": "image", + "img_path": "images/d895489817a6f21c887841484b153778383ad7ed2f565fe9eb3bf5a334aa848c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 263, + 482, + 455, + 583 + ], + "page_idx": 60 + }, + { + "type": "image", + "img_path": "images/7f93176c11f8bb6fd47383525af946efe1f62c62e955b8c39ee12e4183f7af70.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 457, + 483, + 643, + 584 + ], + "page_idx": 60 + }, + { + "type": "image", + "img_path": "images/2ac37d8f19a0434436d70e6adb1d281a20d590ab3f0d0d7a7fa32893d0767278.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 648, + 483, + 844, + 585 + ], + "page_idx": 60 + }, + { + "type": "image", + "img_path": "images/1c5838ea94c22768e1999617b5ceda45f55d61e50d2423844f55c0de5fe7e177.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 166, + 588, + 254, + 688 + ], + "page_idx": 60 + }, + { + "type": "image", + "img_path": "images/06dfd66ade1356c7cd3d3f8e966e4e534aad24c5c887a40d0ac06dfd803d92ab.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 263, + 587, + 455, + 688 + ], + "page_idx": 60 + }, + { + "type": "image", + "img_path": "images/89c711b51bd739f9ec7fcc44d62889f885c5ec915e0fc0cda6d670f5ca049e58.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 457, + 585, + 643, + 690 + ], + "page_idx": 60 + }, + { + "type": "image", + "img_path": "images/5bd9acccaac95cfe7fe77ac7db21eb318d54092425bd5f07277c03e7499c903f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 648, + 588, + 844, + 689 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "Input Text: \"Generate three views of this picture.\"", + "bbox": [ + 308, + 696, + 674, + 713 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 176, + 718, + 267, + 734 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 349, + 720, + 403, + 733 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 503, + 719, + 619, + 733 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "Midjourney v6.1", + "bbox": [ + 684, + 718, + 797, + 734 + ], + "page_idx": 60 + }, + { + "type": "page_number", + "text": "61", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "2.4 Image-to-X Tasks", + "text_level": 1, + "bbox": [ + 127, + 90, + 292, + 104 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "In this section, we further evaluate both GPT-4o and Gemini 2.0 Flash for several dense image understanding tasks, including segmentation-related tasks, depth estimation, normal estimation, matting, salient object detection, edge detection, layout detection, text detection, and object tracking.", + "bbox": [ + 125, + 116, + 870, + 159 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "2.4.1 Image Segmentation", + "text_level": 1, + "bbox": [ + 127, + 172, + 323, + 186 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "Image segmentation tasks group pixels of the given image or video into semantic regions. It is a fundamental problem in computer vision and involves numerous real-world applications, such as robotics, automated surveillance, and image/video editing. With the development of recent deep learning methods, this domain has achieved rapid progress. Early works mainly adopt CNN-based methods with large kernels or respective fields. Recently, transformer-based methods have also worked well and surpassed previous CNN-based methods on various benchmarks. In particular, we test three segmentation tasks, including referring segmentation, semantic segmentation, and panoptic segmentation.", + "bbox": [ + 125, + 196, + 870, + 294 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "Referring Segmentation. This task outputs the corresponding mask according to the input texts, and the goal is to test the pixel-level grounding ability of the model. In Figure 47, we compare GPT-4o, Gemini 2.0 Flash and recent state-of-the-art method, Sa2VA [117] (8B model $\\dagger$ ). We show five open-world test cases. For the first two cases, GPT-4o shows the coarse localization ability on the background region. For example, it can mark the grass region despite the unfavorable boundaries. However, compared to the SOTA method, Sa2VA, GPT-4o mistakenly merges both large regions. In the third row, both GPT-4o and Gemini 2.0 Flash cannot perform grounding with complex text inputs. In the fourth row, all models perform badly. GPT-4o generates an unseen chair in the images while Gemini 2.0 Flash performs image editing functions by replacing the smallest chair with a normal chair. Sa2VA also segments the wrong object (the nearest chair). In the last example, GPT-4o also cannot segment smaller objects (\"bag\"). For all examples, both GPT-4o and Gemini 2.0 Flash modify the image contents. These examples indicate that GPT-4o has weak pixel grounding ability.", + "bbox": [ + 125, + 300, + 869, + 454 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "Semantic Segmentation. Semantic segmentation assigns each pixel a semantic label, which is one basic vision task. In Figure 48, we show several test cases on the semantic segmentation task. In particular, we adopt Deeplab-V3+ [14] (ResNet101 as backbone, trained on Pascal-Context) as one expert model for reference. Surprisingly, the mask quality of GPT-4o is good on four examples, even comparable with an expert model, Deeplab-V3+. During the testing, we find the texts may be randomly appended to the masks. This is why the first row differs from the remaining examples. For the second and third examples, GPT-4o misaligns the text and mask regions. Compared to Gemini 2.0 Flash, GPT-4o has a much stronger ability in semantic segmentation, particularly for mask shape. However, there is still a lot of room for this task, including a unified semantic segmentation format, enhanced text and mask alignments, and more correct mask labels.", + "bbox": [ + 125, + 458, + 870, + 583 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "Panoptic Segmentation. This task assigns the foreground region a semantic label and assigns one mask label and one instance ID to each instance, which is a unified task format of semantic segmentation and instance segmentation. In Figure 49, we compare the panoptic segmentation ability of GPT-4o, Gemini 2.0 Flash, and one expert model, K-Net [123](trained on the COCO panoptic segmentation dataset, with ResNet50 as backbone). Overall, the mask shapes of GPT-4o are good. The model can understand the panoptic segmentation task, while the Gemini 2.0 Flash cannot do this task in the first and third cases. However, the spatial locations have been changed for all cases. The generated masks are in part-whole formats and are even finer-grained than K-Net. For example, in the first example, the jersey number (17) of the person and the hair of the people are also marked. Meanwhile, we also find a similar issue: several examples have text, while several do not have text, even though they adopt the same text prompt. In addition, GPT-4o can distinguish different instances with different colors, despite most of them not being good (see the last example).", + "bbox": [ + 125, + 589, + 870, + 742 + ], + "page_idx": 61 + }, + { + "type": "page_footnote", + "text": "$\\dagger$ https://huggingface.co/ByteDance/Sa2VA-8B", + "bbox": [ + 151, + 896, + 426, + 911 + ], + "page_idx": 61 + }, + { + "type": "page_number", + "text": "62", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "Image-to-X", + "bbox": [ + 212, + 101, + 303, + 116 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "Evaluation: Referring Expression Segmentation, Grounding and Grouping.", + "bbox": [ + 230, + 130, + 785, + 147 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/dbd74c382963ca8096f3df2cbf8c15b1dc9bb0dcf7b14f2691ca5f3969de4ae6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 151, + 326, + 261 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/0602a903854b76208c57b28e4349dae82df8b2aa2e65f275df6b6940b8ec8061.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 343, + 151, + 488, + 260 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/03c4e2f2110cca390cec16e188035acf44295367b91027a771e4e53dedaf79e1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 151, + 651, + 260 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/452eb4a0a98c3bffd9b5ca2149205d5cd34827599c8caa097cbd6d949a07b14c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 671, + 151, + 816, + 260 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "Input Text: \"Please segment the grass in the image and directly generate the output image.\"", + "bbox": [ + 181, + 267, + 802, + 281 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/8f284b6c611448bfc951157e9a8d4ed20c6128da83b6852e318506b931aab1de.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 285, + 328, + 398 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/f99ed011b160f34447b7db945e73c93bb19c1fb9a7888322a3f61e89fa0b9b59.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 285, + 490, + 398 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/c5ff8325e12ed6030f6fd0a88c40d2600c49d7423ec72c29d20775bcecc460a1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 285, + 656, + 398 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/e331257e3a002ea35aae9b3560e329ae51aea6ec05a4a8f8fc4c533cfe981c94.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 671, + 286, + 816, + 398 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "Input Text: \"Please segment the sand in the image and directly generate the output image.\"", + "bbox": [ + 181, + 402, + 792, + 417 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/f2338a3386d7dafb1d4ed2f0d097545a66aaae303a0ec9c364235ccd56f6fb11.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 422, + 326, + 536 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/94ec2cb93bf6619e7cff5d27b701fd16348dc2330513de806c10891f384b214e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 422, + 490, + 536 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/bc7f355d6226e9ae8b3bf01b1ec603dc9a91ef5a37b5df843f5fa2d53f805b40.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 422, + 656, + 536 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/54845239cccd77ffed346bbbb01a2390dc01dd6649c038184a108f0d78f6e18b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 671, + 422, + 816, + 536 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "Input Text: \"Please segment the table beside the black sofa in the image and directly generate the output image.\"", + "bbox": [ + 181, + 539, + 821, + 566 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/40eaf0cc2887a61189056995988a25e411abbb492721dd306939eb435cb2c205.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 570, + 326, + 683 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/fd5210b3394235c1477fe979838a108f3de32982a0527224a4d83dc207ae62bc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 570, + 485, + 683 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/900798659953d6bf5da0b4a07b8d732ff649b11f079acb0d0b7f9e3317082e6e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 570, + 653, + 683 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/44a28c987decc43c2c7016a554b05881a1fabeb9cb87f073992406fc3bf4eeb3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 671, + 570, + 816, + 683 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "Input Text: \"Please segment the smallest chair and directly generate the output image.\"", + "bbox": [ + 187, + 686, + 777, + 700 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/7be4bdc21bc3e0a0da4304091bcaaf819d4c752aa483ba4d25c1add751eaadcc.jpg", + "image_caption": [ + "Figure 47: Task: Image to X: Referring expression segmentation. Evaluate the grounding and grouping ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Sa2VA [117]. Observation: These examples indicate that current GPT-4o has weak pixel-level grounding ability." + ], + "image_footnote": [], + "bbox": [ + 181, + 705, + 326, + 816 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/ddccd12b79d35e83a67c0ec67ebca9d6ffdf81f90dbdc128897666c54e5ca249.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 705, + 485, + 816 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/82c794a5d723f0b36ec6540fc27ae81b30d682d013d9335993d95bcec71b79ae.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 705, + 656, + 816 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/cf3e7ef91003317002f53f486a244b5640ca63c14670c2003aa978121ccb908c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 671, + 705, + 816, + 816 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "Input Text: \"Please segment the bag in the image and directly generate the output image.\"", + "bbox": [ + 189, + 819, + 792, + 833 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 210, + 842, + 295, + 854 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 393, + 842, + 442, + 854 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 526, + 842, + 637, + 854 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "Sa2VA", + "bbox": [ + 720, + 842, + 769, + 854 + ], + "page_idx": 62 + }, + { + "type": "page_number", + "text": "63", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 62 + }, + { + "type": "image", + "img_path": "images/a71db3675a1fc3707882b1ea7573faaee81a29d969a48f7917d231e5c96013a7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 256, + 176, + 277, + 194 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "Evaluation: Semantic Segmentation, Shape and Grouping.", + "text_level": 1, + "bbox": [ + 279, + 181, + 738, + 196 + ], + "page_idx": 63 + }, + { + "type": "image", + "img_path": "images/dfa857e58ea3978f96a70b80835f05da2c3c164ea663a535310bee9d91124824.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 161, + 199, + 313, + 316 + ], + "page_idx": 63 + }, + { + "type": "image", + "img_path": "images/bffe05ee07a77a1df7cf6cc2b2d3a0cb232aa99efd565f3002c170b0ea6c49e3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 198, + 488, + 315 + ], + "page_idx": 63 + }, + { + "type": "image", + "img_path": "images/1fc3e7193935e21d941ba2a69b2a1621e26533675595f4e66648793cc5c521ca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 198, + 653, + 315 + ], + "page_idx": 63 + }, + { + "type": "image", + "img_path": "images/89990c180c4a599a243294b0904684d0351ed3529a4375d02a3ffb1ea5cc5012.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 198, + 828, + 315 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "Input Text: \"Please generate the semantic segmentation result of the image.\"", + "bbox": [ + 163, + 319, + 715, + 335 + ], + "page_idx": 63 + }, + { + "type": "image", + "img_path": "images/fbc284435daba3dd588193455507d1744f38c42e568b7de9d51aacf24568bbd6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 163, + 339, + 313, + 454 + ], + "page_idx": 63 + }, + { + "type": "image", + "img_path": "images/6a5d14299085340af2af8df55c7d78da1c76cf94e7c40072a4aa894710067730.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 339, + 488, + 454 + ], + "page_idx": 63 + }, + { + "type": "image", + "img_path": "images/70b3977d31710e5b25126e9195e5448eee93a4c761b993c52e1db1cb26c59702.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 339, + 656, + 454 + ], + "page_idx": 63 + }, + { + "type": "image", + "img_path": "images/835c092bc8f3a790fdd3ce19ea243873bcd1e8c977f1e3b28cb6589fd8a4f735.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 339, + 828, + 455 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "Input Text: \"Please generate the semantic segmentation result of the image.\"", + "bbox": [ + 163, + 460, + 712, + 477 + ], + "page_idx": 63 + }, + { + "type": "image", + "img_path": "images/0759fb223e415e0f19710f612a3c80b614bae80774a18396353bf7b14eb7da5d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 163, + 484, + 313, + 599 + ], + "page_idx": 63 + }, + { + "type": "image", + "img_path": "images/9915d6c0aa3e038b5ec03ebeb8dcca7a1b7496741251e2af39361e947240d627.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 484, + 488, + 599 + ], + "page_idx": 63 + }, + { + "type": "image", + "img_path": "images/07315caab9c8a97608642744aa18c2e35c6920d402732aa3a2675180b0a7af1e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 484, + 656, + 599 + ], + "page_idx": 63 + }, + { + "type": "image", + "img_path": "images/4bdca3b7ab2d59c6d6c1458affcfac976bc7751cd6959565ea93ddf768f48481.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 484, + 828, + 599 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "Input Text: \"Please generate the semantic segmentation result of the image.\"", + "bbox": [ + 163, + 604, + 712, + 619 + ], + "page_idx": 63 + }, + { + "type": "image", + "img_path": "images/1b277bbf258a90079c84ec982452f71ac0f350730f9aea170064b358fe0eb923.jpg", + "image_caption": [ + "Figure 48: Task: Image to X: Semantic segmentation. Evaluate the shape and grouping ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Deeplab-V3+ [14]. Observation: Compared with Gemin-2.0, the mask quality of GPT-4o is good. However, there are still huge gaps in the standard semantic segmentation format." + ], + "image_footnote": [], + "bbox": [ + 163, + 628, + 313, + 743 + ], + "page_idx": 63 + }, + { + "type": "image", + "img_path": "images/a6a106419b52434d8a55c2b5fb821d02c748ecc0597904cf146832238e26aadd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 628, + 488, + 743 + ], + "page_idx": 63 + }, + { + "type": "image", + "img_path": "images/fd3f62eae10787f13ed9ce858671690809ab040be8b604ced0e3044aef5bc8d9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 628, + 656, + 743 + ], + "page_idx": 63 + }, + { + "type": "image", + "img_path": "images/aa66b3606a91f980129cef7b09a646b9b32aae5937a1a800da9a62b68a314796.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 628, + 828, + 743 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "Input Text: \"Please generate the semantic segmentation result of the image.\"", + "bbox": [ + 171, + 744, + 720, + 761 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 186, + 765, + 277, + 781 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 377, + 765, + 431, + 777 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 519, + 765, + 635, + 777 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "Deeplab-V3+", + "bbox": [ + 702, + 765, + 792, + 780 + ], + "page_idx": 63 + }, + { + "type": "header", + "text": "Image-to-X", + "bbox": [ + 202, + 148, + 299, + 165 + ], + "page_idx": 63 + }, + { + "type": "page_number", + "text": "64", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 63 + }, + { + "type": "image", + "img_path": "images/9fe5199286f0112238a7073ea1794d37f5662a85af0714537b15349dcc732e6c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 271, + 125, + 290, + 143 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "Evaluation: Panoptic Segmentation, Grouping and Shape.", + "text_level": 1, + "bbox": [ + 292, + 128, + 725, + 145 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/dfdea49947535ad053094764e4f1c4f57b87c66ef48041b87f0c1403e8ebfb06.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 151, + 326, + 263 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/0d5ac0ba593b14c0ff43464e9d8cb2a78e3c44bedcbbb29c36a51144ad9c8c34.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 343, + 150, + 490, + 263 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/ae911cf1c44c913e1579fdc6ee25f6acd40fabf3b6bb39cb4fd1df2fd7c49995.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 151, + 653, + 263 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/1b3602b90061d565f84e84ed00f2de84147c20f674c0bc022d3fa5b9bd5926a9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 671, + 151, + 816, + 263 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "Input Text: \"Please generate the panoptic segmentation result of the image.\"", + "bbox": [ + 179, + 267, + 697, + 281 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/9164a12913f12d50e14960da007a5f198f424495fc1f7b5da62b254cd8bd48e1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 285, + 326, + 398 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/3745e5cc44c159dbc30c05e6abfee39434906086aa082386ab669e6a3800719e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 285, + 490, + 398 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/600cb4b1adf7ff450977eb1ff9c1356ca0f2202a614d5297fdfa99e34eb010fc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 285, + 653, + 398 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/1e96cf20eb72b581b66ee41d6ac4ce09a54c1964332e8249d136a88fe87afae1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 671, + 285, + 816, + 398 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "Input Text: \"Please generate the panoptic segmentation result of the image.\"", + "bbox": [ + 179, + 401, + 697, + 416 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/bba2b69130e87b6f2573ea275d85a084e8a7be0d184cc6aba44beed927746b1f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 422, + 326, + 536 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/98a34d23216e8ad84b654750599c6cf856ae93a257ecd204feb4347ff25f2411.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 422, + 488, + 535 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/e19a526ae728ef205a6d09c27b250ea5bcb7433bc30ff0fe90075b9fa80b7e50.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 422, + 656, + 536 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/36a0b0bce8ab290695f007ef9c9240f85222009951f43efd16512c909c48a75e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 671, + 422, + 813, + 535 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "Input Text: \"Please generate the panoptic segmentation result of the image.\"", + "bbox": [ + 179, + 537, + 697, + 553 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/a3692ab260876169e43b10f0a13b3e0a2358a469179695d6391b7121a291f656.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 559, + 326, + 675 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/04d61817b36efe3075ecd5a5ab06c82b578abc68918b2566f24ba1ac9e539e1b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 559, + 488, + 674 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/76af15fc690cc0e7fcb7cc802c4e12a650c1bda8daa750afc68a7050560ab699.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 560, + 655, + 675 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/f550448a4b54f7ddaffc6aa7b44e8f05342e71009989a87f527ce649b3de480a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 671, + 561, + 813, + 675 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "Input Text: \"Please generate the panoptic segmentation result of the image.\"", + "bbox": [ + 187, + 676, + 705, + 691 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/7ea2069c70699c15434b740761c5c8214d80002c7982c766b3b5d6af3f7b2f9d.jpg", + "image_caption": [ + "Figure 49: Task: Image to X: Panoptic segmentation. Evaluate the shape and grouping ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and K-Net [123]. Observation: GPT-4o can understand the panoptic segmentation task, while Gemini 2.0 Flash cannot do this task in the first and third cases." + ], + "image_footnote": [], + "bbox": [ + 179, + 696, + 326, + 806 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/7a80ead376e4c816f64243eb12d3db5695241cc5988afc361964743b675d9e97.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 695, + 488, + 806 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/d1b34b2ffc2bfa4c9301c7d1c2ce46deef595c8d9d33f80db8a4a3538f6a89c5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 696, + 655, + 806 + ], + "page_idx": 64 + }, + { + "type": "image", + "img_path": "images/2e8e24d1cba72b5e1d3fa71c6329231f5130c1a2c84e08710a2bfcef26d50f3e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 671, + 696, + 813, + 806 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "Input Text: \"Please generate the panoptic segmentation result of the image.\"", + "bbox": [ + 187, + 809, + 705, + 824 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 210, + 832, + 295, + 845 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 393, + 832, + 442, + 844 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 526, + 832, + 637, + 844 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "K-Net", + "bbox": [ + 723, + 832, + 767, + 844 + ], + "page_idx": 64 + }, + { + "type": "header", + "text": "Image-to-X", + "bbox": [ + 210, + 99, + 303, + 114 + ], + "page_idx": 64 + }, + { + "type": "page_number", + "text": "65", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "2.4.2 Edge Detection", + "text_level": 1, + "bbox": [ + 127, + 90, + 287, + 104 + ], + "page_idx": 65 + }, + { + "type": "text", + "text": "Edge Detection. As a classic vision task, edge detection aims to identify the boundaries or edges of objects within an image. These edges represent the locations with significant changes in image intensity, color, or other visual features. Common edge detection operators include the Sobel, Prewitt, and Canny operators. Recent works adopt deep learning-based approaches.", + "bbox": [ + 125, + 114, + 870, + 172 + ], + "page_idx": 65 + }, + { + "type": "text", + "text": "In Figure 50, we compare this ability with a recent SOTA deep learning based approach, EMDB [56]. For four examples, we find both GPT-4o and Gemini 2.0 Flash can detect object edges for both foreground and background objects. In addition, the details are even good using GPT-4o. We find two critical issues: 1) The spatial localization of GPT-4o is changed as observed by the segmentation tasks. 2) The content of GPT-4o is also changed. For example, in the first example, the road is generated, which does not exist in the input image.", + "bbox": [ + 125, + 176, + 870, + 247 + ], + "page_idx": 65 + }, + { + "type": "text", + "text": "Image Matting. Image matting is a technique in image processing that aims to separate a foreground object from its background and obtain a detailed alpha matte, which indicates the transparency or opacity of each pixel in the foreground. It goes beyond simple segmentation by providing more precise information about the boundaries and fine details of the object, especially for complex objects like hair or smoke.", + "bbox": [ + 125, + 252, + 870, + 309 + ], + "page_idx": 65 + }, + { + "type": "text", + "text": "In Figure 51, we show three testing examples, with one expert model, Matting Anything [53]. Compared with Gemini, GPT-4o can handle the simple cases, as shown in the third row. Thus, it can understand the task goal. For example, it can even keep the fine-grained details of a horse hair. However, considering the strict requirements of image matting (fine-grained and aligned details), the overall quality is bad. Compared with Matting Anything, both GPT-4o and Gemini work poorly. We find nearly the same issues: 1) Wrong spatial localization, 2) Changed contents.", + "bbox": [ + 125, + 314, + 870, + 398 + ], + "page_idx": 65 + }, + { + "type": "page_number", + "text": "66", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 65 + }, + { + "type": "text", + "text": "Image-to-X", + "text_level": 1, + "bbox": [ + 196, + 222, + 292, + 239 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "Evaluation: Edge Detection, Shape Analysis.", + "text_level": 1, + "bbox": [ + 334, + 252, + 684, + 270 + ], + "page_idx": 66 + }, + { + "type": "image", + "img_path": "images/0cfb348c56fe1101617d57441e326b36eadf434565b6c1880650127baaf8224e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 145, + 276, + 326, + 369 + ], + "page_idx": 66 + }, + { + "type": "image", + "img_path": "images/7bf3e8e8fcd88c5d02dc116f91c2aa8891be20f22eda6960e71e5c24543006c7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 326, + 277, + 480, + 369 + ], + "page_idx": 66 + }, + { + "type": "image", + "img_path": "images/d8de1fb96772ae2a9a2c7fa60dfee2b9c9838cdd7633d066debcab1d110abac9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 480, + 277, + 663, + 369 + ], + "page_idx": 66 + }, + { + "type": "image", + "img_path": "images/c57774706ddf76b7eafe1bc39859e9e41d2e9be8a2ed40d9b69c030d906ac930.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 276, + 843, + 369 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "Input Text: \"Please detect the edge of object in this image and output the final image.\"", + "text_level": 1, + "bbox": [ + 161, + 380, + 782, + 397 + ], + "page_idx": 66 + }, + { + "type": "image", + "img_path": "images/35081435a9dddead08f2e1591ed883e214e7ed45a52ef1bb2df34789a8f65f50.jpg", + "image_caption": [ + "Figure 50: Task: Image to X: Edge detection. Evaluate the shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and EDMB [56]. Observation: We find both GPT-4o and Gemini 2.0 Flash can detect object edges for both foreground and background objects." + ], + "image_footnote": [], + "bbox": [ + 143, + 407, + 328, + 503 + ], + "page_idx": 66 + }, + { + "type": "image", + "img_path": "images/61a9787b52f106410ced18bbf723e46ef2d3db614a1b9796dd588397ad04206e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 328, + 407, + 460, + 503 + ], + "page_idx": 66 + }, + { + "type": "image", + "img_path": "images/a0ebad62eca5fc477e0815751adacf1e46b3abcdce6c306101ecb96b7043a8c4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 460, + 407, + 648, + 505 + ], + "page_idx": 66 + }, + { + "type": "image", + "img_path": "images/425b5ab97efea03f5a9f305b7eb62c794a077f423e347f1fbc1c194faca7ed85.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 653, + 407, + 844, + 506 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "Input Text: \"Please detect the edge of object in this image and output the final image.\"", + "text_level": 1, + "bbox": [ + 161, + 508, + 782, + 523 + ], + "page_idx": 66 + }, + { + "type": "image", + "img_path": "images/63d21e13c3a1a35a1fd1c9cedc021755050cc1398455173eaf4da3af031229aa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 143, + 525, + 316, + 659 + ], + "page_idx": 66 + }, + { + "type": "image", + "img_path": "images/1e393f9e4a534fd74efa40049b2b251a874ba84af7a4ec4b9bafb559308b436f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 316, + 523, + 488, + 659 + ], + "page_idx": 66 + }, + { + "type": "image", + "img_path": "images/7af8979fc8ec37fdc42425294aa893ae45a15b13d8806f2f8c4eac75eae9952b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 491, + 525, + 663, + 659 + ], + "page_idx": 66 + }, + { + "type": "image", + "img_path": "images/b41cebcb69ab1488c833010b4ba4c0e40ecd6d2c22ed7583995ccecfc50154af.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 525, + 838, + 659 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "Input Text: \"Please detect the edge of object in this image and output the final image.\"", + "text_level": 1, + "bbox": [ + 160, + 666, + 782, + 683 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 184, + 696, + 276, + 713 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 377, + 696, + 431, + 710 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 517, + 696, + 635, + 710 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "EDMB", + "bbox": [ + 727, + 696, + 772, + 710 + ], + "page_idx": 66 + }, + { + "type": "page_number", + "text": "67", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "Image-to-X", + "bbox": [ + 196, + 188, + 292, + 205 + ], + "page_idx": 67 + }, + { + "type": "image", + "img_path": "images/7ee4ab20360492c6594cd85f6e0806bdd6ac326a6b9f951620d2d3e338cdc66e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 292, + 215, + 313, + 232 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "Evaluation: Image Matting, Grouping and Shape.", + "bbox": [ + 313, + 218, + 704, + 234 + ], + "page_idx": 67 + }, + { + "type": "image", + "img_path": "images/6e8e2c78c4556e12bb41898d5bd695dbc241b0124a7f8964bc5853e2c5f14dcc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 165, + 243, + 320, + 362 + ], + "page_idx": 67 + }, + { + "type": "image", + "img_path": "images/d4c547c6a3d707292ee701dab81a3f08897230cfc304d056c77c1b62ede0a10e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 243, + 491, + 361 + ], + "page_idx": 67 + }, + { + "type": "image", + "img_path": "images/4bad73d42a2bdd07f8a75924ab1f205dab5f3e1919f244ff0bab551eb3c0c55d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 243, + 661, + 361 + ], + "page_idx": 67 + }, + { + "type": "image", + "img_path": "images/38e332ce3a2562225f948c109c071db065296fc2fbfae678e295cb014e1ab37d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 243, + 834, + 361 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "Input Text: \"Please Please matting the foreground and remove the background. Please directly generate the output image.\"", + "bbox": [ + 161, + 364, + 830, + 393 + ], + "page_idx": 67 + }, + { + "type": "image", + "img_path": "images/f36d385ab421fbf6d5de21b9a66c5a418a213e03c378e1602b8431ffa1b78dec.jpg", + "image_caption": [ + "Figure 51: Task: Image to X: Image matting. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Matting Anything [54]. Observation: Compared with Gemini, GPT-4o can handle the simple cases, as shown in the third row. However, considering the strict requirements of image matting (fine-grained and aligned details), the overall quality is bad." + ], + "image_footnote": [], + "bbox": [ + 165, + 397, + 320, + 517 + ], + "page_idx": 67 + }, + { + "type": "image", + "img_path": "images/288f52547eff2bdfad27eac3648ba1b7a88af6beceffdce0913c9495832f57fb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 397, + 478, + 516 + ], + "page_idx": 67 + }, + { + "type": "image", + "img_path": "images/301c1384224e00b3fa898587c4f5578363c3b8189cb4bd2ba2d363bd9d31a9c8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 397, + 663, + 516 + ], + "page_idx": 67 + }, + { + "type": "image", + "img_path": "images/b95a63199f82c29f9f6a1d717b00f28bde57c3fa11bf2de4c55eab61642dcf7e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 397, + 833, + 516 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "Input Text: \"Please Please matting the foreground and remove the background. Please directly generate the output image.\"", + "bbox": [ + 163, + 517, + 831, + 545 + ], + "page_idx": 67 + }, + { + "type": "image", + "img_path": "images/e6a106a5ad2b6d2b4ff973ae0d66eb902d883da385dea0f75e0dbef3aaddd4b0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 165, + 549, + 316, + 669 + ], + "page_idx": 67 + }, + { + "type": "image", + "img_path": "images/81b21df800b94c90849050cd7c2fb977ff2ab50081bab2870fe2b7f18fd3d602.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 556, + 500, + 659 + ], + "page_idx": 67 + }, + { + "type": "image", + "img_path": "images/6af19de892cfb5e57fdbd96c732a8b33020d3b991fab34b219b07d9804454a8f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 549, + 658, + 667 + ], + "page_idx": 67 + }, + { + "type": "image", + "img_path": "images/851db8a91140cd33caf2d0e7e2e00637b208f9f9b4690e97f12ffd0fbb726feb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 550, + 833, + 669 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "Input Text: \"Please Please matting the foreground and remove the background. Please directly generate the output image.\"", + "bbox": [ + 163, + 670, + 830, + 699 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 194, + 703, + 285, + 719 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 387, + 703, + 439, + 715 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 527, + 703, + 643, + 715 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "Matting Anything", + "bbox": [ + 727, + 695, + 789, + 724 + ], + "page_idx": 67 + }, + { + "type": "page_number", + "text": "68", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "2.4.3 Salient Object", + "text_level": 1, + "bbox": [ + 127, + 90, + 282, + 106 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "Salient Object Detection. Salient object detection is a crucial technique in the field of computer vision and image processing. It aims to identify and locate the most visually prominent objects within an image or a video sequence.", + "bbox": [ + 125, + 114, + 870, + 143 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "In Figure 52, we adopt one expert model, BiRefNet [127], as reference. For all examples, compared with Gemini 2.0 Flash, GPT-4o can detect relevant salient objects with the text prompts while Gemini can not achieve this. The second example shows that the GPT-4o can generate the aligned salient masks. However, for other examples, the spatial location is not changed where the results are generated according to the input image and potential classes. In the last examples, GPT-4o cannot generate multiple salient object masks, which is also a limitation when dealing with multiple objects.", + "bbox": [ + 125, + 148, + 867, + 233 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "Mirror Detection. Mirror detection is a task in computer vision that focuses on identifying mirror surfaces within an image or a scene. Previous works explore this direction by adopting visual cues and geometric cues.", + "bbox": [ + 125, + 239, + 867, + 268 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "In Figure 53, we also explore this ability for both GPT-4o and Gemini 2.0 Flash. As for comparison, we adopt a recent SOTA expert model, VMD [107]. For simple cases, we find that GPT-4o can carry out mirror detection, as shown in the first example. For the complex scene, it cannot work as well as the expert model, VMD. As shown in the second example, it generates a fake mirror and leads to a wrong image output with a line to mark the boundaries of the fake mirror. As shown in the last row, GPT-4o treats several rectangular objects as mirrors, leading to several false positive examples.", + "bbox": [ + 125, + 273, + 867, + 357 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "**Shadow Detection.** Shadow detection is a significant process in computer vision and image processing that aims to identify and localize shadow regions in an image or a video. This technique is crucial, as shadows can otherwise disrupt object detection, recognition, and scene analysis.", + "bbox": [ + 125, + 363, + 867, + 405 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "In Figure 54, we compare and test this ability for GPT-4o. We adopt the SOTA model, SDDNet [21] for reference. For the simple examples (single objects and no objects in the image), both GPT-4o and Gemini can localize the shadow, as shown in the first two rows. For more complex examples, both models detect both objects and their shadows with one mask output, as shown in the last two rows. Thus, GPT-4o cannot handle these inputs. In addition, the spatial misalignments also happen for all the cases.", + "bbox": [ + 125, + 411, + 867, + 481 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "Camouflage Object Detection. Camouflage object detection is a challenging task in computer vision. It aims to identify objects that are designed to blend into their backgrounds, making them difficult to distinguish by human eyes or traditional detection methods. This has a wide application for the military, security, and wildlife conservation.", + "bbox": [ + 125, + 487, + 867, + 542 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "As shown in Figure 55, we also include one expert model, BiRefNet [127] for reference. For all examples, both GPT-4o and Gemini 2.0 Flash can detect and segment the camouflage animals for simple cases, as shown in the last two rows. GPT-4o can also detect the specific object, given the text prompt, as shown in the first row. However, the same misalignment issues still exist. In addition, it also mixes segmentation maps (in binary masks or color masks), as shown in the last row.", + "bbox": [ + 125, + 547, + 867, + 619 + ], + "page_idx": 68 + }, + { + "type": "page_number", + "text": "69", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "Image-to-X", + "bbox": [ + 200, + 103, + 297, + 119 + ], + "page_idx": 69 + }, + { + "type": "image", + "img_path": "images/35b326fb2148865594e763630192efc498f76114d6db2844fc81e1f5810bf287.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 254, + 133, + 272, + 151 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "Evaluation: Salient Object Detection, Grouping and Shape.", + "bbox": [ + 277, + 138, + 741, + 155 + ], + "page_idx": 69 + }, + { + "type": "image", + "img_path": "images/2632b121866020ffde6bdceee821e4d81430159476d96e0e01139dc9d082ba70.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 155, + 162, + 325, + 260 + ], + "page_idx": 69 + }, + { + "type": "image", + "img_path": "images/2cef513138676cfd0753561a3373c26db0d5be094be17da19006ce2882bdbd13.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 330, + 162, + 498, + 260 + ], + "page_idx": 69 + }, + { + "type": "image", + "img_path": "images/f1f8095ce03c55845db1331630f40c1812ffda931409ba5f89385376c17bfa67.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 162, + 674, + 261 + ], + "page_idx": 69 + }, + { + "type": "image", + "img_path": "images/7ba0cc1081bd824ad107543a5dbe34527a4105ea1c1e9c55a5359263cccada3f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 162, + 849, + 261 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "Input Text: \"Give me the segmentation map of the most salient objects in this image. Return resulting image by using image generation.\"", + "bbox": [ + 163, + 263, + 820, + 292 + ], + "page_idx": 69 + }, + { + "type": "image", + "img_path": "images/6076be77d027d41534f7c4a4fd02e93499c2b66c1cccf37db5b69d7f522c9268.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 156, + 292, + 323, + 396 + ], + "page_idx": 69 + }, + { + "type": "image", + "img_path": "images/17e080b72e8be225cacc760b2d20b9fc982e4786f8527d6dc2ca0c3cbfbee8d2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 330, + 292, + 496, + 396 + ], + "page_idx": 69 + }, + { + "type": "image", + "img_path": "images/8f405db10bccda82cc90e5b0887cf8f372d62c3308cf2e2f187d9b3c87f37919.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 292, + 673, + 396 + ], + "page_idx": 69 + }, + { + "type": "image", + "img_path": "images/a34351a4399d5f36a740b505bcdf6d1791a25970e7e86a6c65bfafedbf3d931e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 292, + 849, + 396 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "Input Text: \"Give me the segmentation map of the most salient objects in this image. Return resulting image by using image generation.\"", + "bbox": [ + 163, + 398, + 821, + 426 + ], + "page_idx": 69 + }, + { + "type": "image", + "img_path": "images/8619a8168756037dbe0d396ebd1fa868b9e5eebb02bfe0bc848ba11f025e59c5.jpg", + "image_caption": [ + "Figure 52: Task: Image to X: Salient object detection. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and BiRefNet [127]. Observation: For all examples, compared with Gemini, GPT-4o can detect related salient objects with the text prompts while Gemini can not achieve this function." + ], + "image_footnote": [], + "bbox": [ + 156, + 428, + 321, + 513 + ], + "page_idx": 69 + }, + { + "type": "image", + "img_path": "images/11fb36737f5588598b2c288b162d49548d2524ca0fb7dcafb49bbd7f1ef163a6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 428, + 496, + 513 + ], + "page_idx": 69 + }, + { + "type": "image", + "img_path": "images/8fcfecbfee8e3af8ee12f1909d0f8d6442457efaad84be98a5ae90af97d909d1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 428, + 673, + 513 + ], + "page_idx": 69 + }, + { + "type": "image", + "img_path": "images/689143eb07d759080356a1acc014ea1f91236ddde377299fa31b78f9f34ed9bc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 428, + 849, + 513 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "Input Text: \"Give me the segmentation map of the most salient objects in this image. Return resulting image by using image generation.\"", + "bbox": [ + 163, + 516, + 820, + 545 + ], + "page_idx": 69 + }, + { + "type": "image", + "img_path": "images/fd89b62d113f6d9cde9f7af5c15f0c3435fb8051b9a81f77a0195174a31e7771.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 156, + 546, + 321, + 642 + ], + "page_idx": 69 + }, + { + "type": "image", + "img_path": "images/53c762f53aba99798ebe7781772fd8e13f36de30319f71548adb22664211cf14.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 546, + 498, + 642 + ], + "page_idx": 69 + }, + { + "type": "image", + "img_path": "images/c0db26c61074cc97eac7d00434065dec4cfdf52edd3efc8af38cd021e086e4d3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 546, + 673, + 642 + ], + "page_idx": 69 + }, + { + "type": "image", + "img_path": "images/6b703d57fd8284c322132b56c18ad285d5a472bca055f06fdf9ddb620574ba3c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 546, + 849, + 642 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "Input Text: \"Give me the segmentation map of the most salient objects in this image. Return resulting image by using image generation.\"", + "bbox": [ + 163, + 643, + 820, + 675 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 196, + 681, + 287, + 696 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 388, + 681, + 442, + 693 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 529, + 681, + 648, + 694 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "BiRefNet", + "bbox": [ + 728, + 681, + 797, + 694 + ], + "page_idx": 69 + }, + { + "type": "page_number", + "text": "70", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "Image-to-X", + "bbox": [ + 196, + 215, + 292, + 231 + ], + "page_idx": 70 + }, + { + "type": "image", + "img_path": "images/f93d94cd33ec493cea22f6299c05435ef6df644e620ad03fc4906616201dd94b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 285, + 241, + 307, + 258 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "Evaluation: Mirror Detection, Grouping and Shape.", + "text_level": 1, + "bbox": [ + 308, + 244, + 712, + 262 + ], + "page_idx": 70 + }, + { + "type": "image", + "img_path": "images/314ecf8346962cc266f1e67d8f88be07f8a8eaca1ca9d18ed756eea986ca6a35.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 165, + 270, + 316, + 387 + ], + "page_idx": 70 + }, + { + "type": "image", + "img_path": "images/8de34136e3817eb2b912dec2f52ae54240a4d61a51e8f5d08695f60f906cb9b3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 268, + 493, + 386 + ], + "page_idx": 70 + }, + { + "type": "image", + "img_path": "images/3a21a982cd81980080fc0bfe53df60869cb5c77ea93c8479b28806aab9298ca1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 268, + 661, + 387 + ], + "page_idx": 70 + }, + { + "type": "image", + "img_path": "images/461d26dcff028be557e2efbcb1789125eb28fa946c8494b2dc6491aefc57fa30.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 271, + 831, + 386 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "Input Text: \"Please segment all the mirror in the image and directly generate the output image.\"", + "text_level": 1, + "bbox": [ + 163, + 391, + 841, + 407 + ], + "page_idx": 70 + }, + { + "type": "image", + "img_path": "images/cf3a11167038e14a620917c62c66c24cffe74d0bace22efc3b2051879d1ac82a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 165, + 412, + 316, + 531 + ], + "page_idx": 70 + }, + { + "type": "image", + "img_path": "images/1bc679f32fbc6bb7c76fac345ab8d2e03f52eb51f303c9abdaaec3738e3b8348.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 412, + 493, + 531 + ], + "page_idx": 70 + }, + { + "type": "image", + "img_path": "images/dcde7c31e72b7a5f000f1f4ce3071419d9f67361a8d18ec4966c3bb5f4513ab0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 412, + 661, + 531 + ], + "page_idx": 70 + }, + { + "type": "image", + "img_path": "images/83a5063b90e2d85ae8723b1c0527ebfeb79cf418f10b31ccefd38f2bc7514bf1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 412, + 831, + 531 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "Input Text: \"Please segment all the mirror in the image and directly generate the output image.\"", + "text_level": 1, + "bbox": [ + 165, + 532, + 844, + 547 + ], + "page_idx": 70 + }, + { + "type": "image", + "img_path": "images/00cde007c7ba440020df1e4e47694ace493a34ef86885baa498937144d1106a2.jpg", + "image_caption": [ + "Figure 53: Task: Image to X: Mirror detection. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and VMD [107]. Observation: For simple cases, we find that GPT-4o can carry out mirror detection, as shown in the first example. For the complex scene, it cannot work as well as VMD." + ], + "image_footnote": [], + "bbox": [ + 165, + 554, + 315, + 672 + ], + "page_idx": 70 + }, + { + "type": "image", + "img_path": "images/ce4d2858b6a29f44fe079438ac4f53fe674726328c863a6f0a045947d1adac83.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 554, + 493, + 671 + ], + "page_idx": 70 + }, + { + "type": "image", + "img_path": "images/1d6b2ece4089619c8dc3c3901314db44c609491a2174435fb009e552ab953e21.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 554, + 658, + 671 + ], + "page_idx": 70 + }, + { + "type": "image", + "img_path": "images/1a402093b931cc216473bf07b0f3c0350d4d6dad3092d0e84036254347477cea.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 554, + 831, + 671 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "Input Text: \"Please segment all the mirror in the image and directly generate the output image.\"", + "text_level": 1, + "bbox": [ + 165, + 674, + 844, + 688 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 194, + 695, + 285, + 710 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 387, + 695, + 439, + 707 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 527, + 695, + 643, + 707 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "VMD", + "bbox": [ + 740, + 694, + 776, + 705 + ], + "page_idx": 70 + }, + { + "type": "page_number", + "text": "71", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "Evaluation: Shadow Detection, Grouping and Shape.", + "text_level": 1, + "bbox": [ + 303, + 140, + 715, + 157 + ], + "page_idx": 71 + }, + { + "type": "image", + "img_path": "images/88eb6272e2083062c89d2da6799a7fbf0bebd2d74c908f4e8fab3e2024206b52.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 163, + 161, + 328, + 246 + ], + "page_idx": 71 + }, + { + "type": "image", + "img_path": "images/f723f0fa1807616a1f9fc4a326c92c0dbc8d41bfa409fc55bcd69f03cf0e4cc4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 161, + 500, + 247 + ], + "page_idx": 71 + }, + { + "type": "image", + "img_path": "images/50de3ca29f0a98439974b500ee80d55a3e4acb3f252738f8682accecf3be15e0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 161, + 671, + 247 + ], + "page_idx": 71 + }, + { + "type": "image", + "img_path": "images/ba72c57d88f3c19ba187e2c20d73fd9352c5b8f47d0b13557d72b3f97860362a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 678, + 161, + 843, + 247 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "Input Text: \"Give me with the segmentation map of the shadow in this image. Set the shadow region to white and the other regions to black. Return the resulting image using image generation.\"", + "bbox": [ + 173, + 248, + 818, + 290 + ], + "page_idx": 71 + }, + { + "type": "image", + "img_path": "images/7968ea927b3c105ef0585b8c69f50258a2601dd8140b619a7c907020835befb5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 163, + 292, + 326, + 458 + ], + "page_idx": 71 + }, + { + "type": "image", + "img_path": "images/98fe6ae3af0e25ba30cef5b80dbf80b30b9cf636e59a4dadffc0a63864c71cdd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 292, + 500, + 459 + ], + "page_idx": 71 + }, + { + "type": "image", + "img_path": "images/7665c0eede3baa10374170e0e9e1926291832c424771b68766df8bf95ef3f8f9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 292, + 671, + 459 + ], + "page_idx": 71 + }, + { + "type": "image", + "img_path": "images/ce1b3984804538be38498c0aaa6fc99ea0991de8d1262b9961e409a6f9e0f3cb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 678, + 292, + 841, + 458 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "Input Text: \"Give me with the segmentation map of the shadow in this image. Set the shadow region to white and the other regions to black. Return the resulting image using image generation.\"", + "bbox": [ + 173, + 460, + 818, + 502 + ], + "page_idx": 71 + }, + { + "type": "image", + "img_path": "images/9a09eb88416d90950b00ecbf953c22c1a7c50188a8cd661fe6744dcc97906d76.jpg", + "image_caption": [ + "Figure 54: Task: Image to X: Shadow detection. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SDDNet [21]. Observation: For more complex examples, both models detect both objects and their shadows with one mask output, as shown in the last two rows, leading to false positive predictions." + ], + "image_footnote": [], + "bbox": [ + 165, + 503, + 328, + 671 + ], + "page_idx": 71 + }, + { + "type": "image", + "img_path": "images/df7e8259254f0cce7de2b44908edba93ace97aedc79647c2a02a490d81d15f6d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 503, + 500, + 671 + ], + "page_idx": 71 + }, + { + "type": "image", + "img_path": "images/1e842cfe1f25dd038d061f3ab7dd7da184ce007a8b89e10fd9297ee4bd1d051c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 503, + 671, + 671 + ], + "page_idx": 71 + }, + { + "type": "image", + "img_path": "images/1b8d6a02d0d6267d2cb4821bda728d32861a5521511305a19499be24186e0a29.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 678, + 503, + 841, + 671 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "Input Text: \"Give me with the segmentation map of the shadow in this image. Set the shadow region to white and the other regions to black. Return the resulting image using image generation.\"", + "bbox": [ + 173, + 672, + 820, + 714 + ], + "page_idx": 71 + }, + { + "type": "image", + "img_path": "images/abb40d377e6294ce85ec725e10870be0d09e757df45ef1ac17d63772ebe95887.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 163, + 715, + 328, + 789 + ], + "page_idx": 71 + }, + { + "type": "image", + "img_path": "images/c5e565189347123ae969a22fa0002406f12393da53fa69379e6fbe9b7fbaddfa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 717, + 500, + 789 + ], + "page_idx": 71 + }, + { + "type": "image", + "img_path": "images/087970cb94a73330a9143d8515c6bdd8a8061b49291d5ee3caa0aa8911c73064.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 717, + 671, + 789 + ], + "page_idx": 71 + }, + { + "type": "image", + "img_path": "images/55eb18aa82b40bdb5d13022c1ffe68e93f0cf3359c85faa059e9e14d3e5ff8c3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 678, + 718, + 841, + 789 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "Input Text: \"Give me with the segmentation map of the shadow in this image. Set the shadow region to white and the other regions to black. Return the resulting image using image generation.\"", + "bbox": [ + 173, + 791, + 818, + 833 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 205, + 835, + 294, + 849 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "GPT-40", + "bbox": [ + 393, + 835, + 447, + 848 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 532, + 835, + 647, + 848 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "SDDNet", + "bbox": [ + 730, + 835, + 789, + 847 + ], + "page_idx": 71 + }, + { + "type": "header", + "text": "Image-to-X", + "bbox": [ + 207, + 107, + 302, + 123 + ], + "page_idx": 71 + }, + { + "type": "page_number", + "text": "72", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 71 + }, + { + "type": "image", + "img_path": "images/0c35796bfccda9797611a7529116ead987ca3802f68e686dd8c44e01a6055d5c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 233, + 202, + 254, + 219 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "Evaluation: Camouflage Object Detection, Grouping and Shape.", + "text_level": 1, + "bbox": [ + 258, + 205, + 761, + 222 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/f58d9105160e737da34562b6c9c09dca8743d388c9ac9c2f0b8894657da4a521.jpg", + "image_caption": [ + "Input Text: \"Give me the segmentation map of the crocodile in this image. Return resulting image by using image generation.\"" + ], + "image_footnote": [], + "bbox": [ + 155, + 231, + 323, + 325 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/f77b58aea411a06d091798fd19812f2d11b864849cf60dea1419dbb96bdeab4b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 231, + 496, + 325 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/36ba8480160ce7a11902f9d63c93a779ad81abc66d85c984fe46a402a46397ff.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 231, + 671, + 325 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/628f0ed3fe5c2872259b8c53ed502d9c944f34e65615f8483a616fc2cad70310.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 231, + 848, + 325 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/b3d11b71a64fbc57f21f8ab46676a1e3eb0408ddfc762cacc7d3fc67d895124c.jpg", + "image_caption": [ + "Input Text: \"Give me the segmentation map of the fish in this image. Return resulting image by using image generation.\"" + ], + "image_footnote": [], + "bbox": [ + 156, + 362, + 321, + 460 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/73484faa2e3dc80819041f2a89b22046ef3ff187c2c48e51c3cfc6d540a16621.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 362, + 496, + 460 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/8a361a39ea1256599aa45e581bc05adce2bd4ba199154e81ac85761e04a5d305.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 362, + 671, + 460 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/24ab3a835dd0ae72fb511f6dddb64f64bce8bf595463eb6cff06f05aa41f50a1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 362, + 848, + 460 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/04a1ddeb73159d510035c4040bc6dea5a7b55b6b32df8201b3f02a9a20621b00.jpg", + "image_caption": [ + "Input Text: \"Give me the segmentation map of the fish in this image. Return resulting image by using image generation.\"" + ], + "image_footnote": [], + "bbox": [ + 156, + 497, + 321, + 558 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/0964f3c1bd27ee97370c3070e8b80b145333e5ecc74f44dff7f9bad2234476a9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 497, + 496, + 558 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/db93f8cdc75167415d8ec0a2d7ba0ce8bac4ffe433a2ff11a79751a9deb5cc51.jpg", + "image_caption": [ + "Figure 55: Task: Image to X: Camouflage object detection. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and BiRefNet [127]. Observation: Both GPT-4o and Gemini 2.0 Flash can detect and segment the camouflage animals for simple cases. However, the spatial misalignments still exist." + ], + "image_footnote": [], + "bbox": [ + 504, + 497, + 671, + 558 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/cc09b3c0d84509e3bc312aabe02473313c5d4fecd18fd590ee4eaa3bf1153770.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 497, + 848, + 558 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/50ee1a066627ca3b776f328349447b722e1083477451cb97c6b6bfb2c2054b04.jpg", + "image_caption": [ + "Input Text: \"Give me the segmentation map of the toad in this image. Return resulting image by using image generation.\"" + ], + "image_footnote": [], + "bbox": [ + 156, + 595, + 323, + 691 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/72211f5127f3ea905b058ad3d92ebb6f194bea63cc537e32cd09a144926d10d7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 595, + 500, + 691 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/44a15d6fa321c083f59b0c39c8f37dd0512f34e39c60f7e70976d6279609306a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 595, + 671, + 691 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/ca7cc128d79c704808bc1637b90a08b777ee9b0baab8865ec66e337610c7be1e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 595, + 848, + 691 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 196, + 734, + 287, + 750 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "GPT-40", + "bbox": [ + 388, + 734, + 442, + 747 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 531, + 734, + 648, + 747 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "BiRefNet", + "bbox": [ + 728, + 734, + 795, + 747 + ], + "page_idx": 72 + }, + { + "type": "header", + "text": "Image-to-X", + "bbox": [ + 200, + 170, + 297, + 186 + ], + "page_idx": 72 + }, + { + "type": "page_number", + "text": "73", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "2.4.4 Depth Estimation", + "text_level": 1, + "bbox": [ + 127, + 90, + 305, + 107 + ], + "page_idx": 73 + }, + { + "type": "text", + "text": "The depth estimation task involves predicting the distance from the camera to objects within a scene. In this paper, we focus on monocular depth estimation, which takes a single image as input. In Figure 56, we compare GPT-4o, Gemini 2.0 Flash, and a recent SOTA method, Depth-Anything [114]. We first notice that Gemini cannot produce reasonable depth estimations. For GPT-4o, although it can output a fancy depth map visualization, we want to point out that this output is a grayscale visualization of depth estimation and cannot be directly converted to the depth of each pixel. We show mainly five cases. In the first test case, we notice that GPT-4o is good at capturing details in images, which Depth-Anything may not be good at. Although we cannot directly determine the accuracy of the depth value, we can judge from the visualization that the depth relationship between objects is accurate. What GPT-4o cannot do well is the background. Since the background in the image is the sky, we can infer from common sense that these areas are infinitely far away from the camera. However, the depth map output of GPT-4o does not handle these areas correctly. GPT-4o performs similarly in the second, fourth, and fifth examples. Among them, we would like to emphasize the fourth test case, since for buildings farther away, GPT-4o has no way to effectively analyze the distance between each building and the camera. In the third example, although the output of GPT-4o is very confusing, it completely misunderstands the depth relationship of the entire image. Therefore, we believe that the depth estimation performance of GPT-4o is still unstable.", + "bbox": [ + 125, + 114, + 872, + 324 + ], + "page_idx": 73 + }, + { + "type": "page_number", + "text": "74", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 73 + }, + { + "type": "text", + "text": "Image-to-X", + "text_level": 1, + "bbox": [ + 184, + 148, + 282, + 165 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/28038c53ecd04cd0c7f8a9e7042077716a5c924fd1bcd4bdb4dc296a5e381da1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 348, + 175, + 370, + 194 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "Evaluation: Depth Estimation", + "text_level": 1, + "bbox": [ + 372, + 179, + 627, + 196 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/c0365698147e513e49bef371ac74ee5fb93c63b6c1496ce9ac4635ce65cd901e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 140, + 203, + 313, + 292 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/607880b57a1f83f7d1721514860baaaa5af0252138afb508b72a65611fc2b893.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 203, + 493, + 292 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/5d70486874bdca6ec3533ff018df819168b3864b389e7def95b35ea1d5fd3941.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 203, + 678, + 292 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/1d5ece3f0e15c3834d92639316498ad868ae535068088c79e13b0ab393f5994b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 683, + 203, + 856, + 292 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "Input Text: \"Please generate the depth map prediction of this image.\"", + "bbox": [ + 158, + 292, + 692, + 310 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/96954093e4eb35d0ca710e8119a031cdc99bc1453c9ec711f72072006f7ffbbb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 140, + 315, + 313, + 405 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/c13099707bf9fe0d5399ee37e5da600f88c1036e16f352a6e9ded8e89a045d2e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 315, + 491, + 404 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/4b28cc6dcc84678e3173e8fa682a9dc064ea0f9777f5f93daeba4307f5cb80ec.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 315, + 676, + 404 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/ab5d35fd29583376a884277394b92fa4599215c40d88cdcc94c6b9c9bd93f1ae.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 684, + 315, + 854, + 405 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "Input Text: \"Please generate the depth map prediction of this image.\"", + "bbox": [ + 158, + 405, + 692, + 421 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/e822d81326618b0d941cb4c3271bef0fd0548efa0ef06244f8b36e4b001ccf7a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 140, + 425, + 313, + 513 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/76849dc2c4d82c6863264e63ab6e8135e1109cad0f3635b978fc71a1be56563d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 426, + 493, + 513 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/c1871f48314d2af2fd2dfd3ce2bf7c8ce6dd5d5432b0c3771e9206b1cb6dbbef.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 426, + 676, + 513 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/40f481609f3f72c80c1bb3a1911a2bcc98e71fe948326192df5cb8ed5f01b0a6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 684, + 426, + 856, + 513 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "Input Text: \"Please generate the depth map prediction of this image.\"", + "bbox": [ + 158, + 517, + 692, + 534 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/091b9e25759363e18a4a8027407ebff16dae2c1d7196bfd2671970b2dfdf18dc.jpg", + "image_caption": [ + "Figure 56: Task: Image to X: Depth estimation. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Depth-Anything [114]. Observation: We convert the depth map generated by Depth-Anything into a visualization map similar to GPT-4o. This evaluation shows that GPT-4o has the capability of distinguishing the depth relationship of different parts in the image, but its understanding of the background is insufficient." + ], + "image_footnote": [], + "bbox": [ + 140, + 537, + 313, + 622 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/eb86efcba3a9c240da3e722bda20727de1f615bfe422fab64ff3b091cd85df06.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 537, + 493, + 622 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/042e2b1b154968fc9607cbadf6e30a37999a81fe9a06bc6f75a82aeaafc4d979.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 537, + 676, + 621 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/d0fbcde1d1f0ba76a9c49ab60586f0a6bd39c2d0d6c38d24f1164c0b482dacaa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 684, + 537, + 856, + 622 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "Input Text: \"Please generate the depth map prediction of this image.\"", + "bbox": [ + 161, + 623, + 694, + 638 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/7b4abe90aad35a4b1cd796581e661c9ef27e071aff9b1800f3b7341066fed935.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 140, + 643, + 313, + 732 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/2f8dd6355dcb71362e42158e49ed0aac5ddd056ed4d4516867721782b9b2a0b2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 643, + 493, + 732 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/07fe029890bbeded3efc202fc24a6394fb11dda52778c7ad9cd6a134088f6ae9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 643, + 676, + 732 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/cce42e0f3762b76b5639e2686fa7e95138a6dafc58db634f3dc3c0d47aebd717.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 684, + 643, + 854, + 732 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "Input Text: \"Please generate the depth map prediction of this image.\"", + "bbox": [ + 161, + 733, + 697, + 750 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 181, + 757, + 279, + 773 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "GPT-40", + "bbox": [ + 380, + 760, + 439, + 773 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 524, + 760, + 651, + 775 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "Depth-Anything", + "bbox": [ + 707, + 760, + 833, + 776 + ], + "page_idx": 74 + }, + { + "type": "page_number", + "text": "75", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "2.4.5 Normal Estimation", + "text_level": 1, + "bbox": [ + 127, + 90, + 316, + 104 + ], + "page_idx": 75 + }, + { + "type": "text", + "text": "The surface normal estimation task involves predicting the orientation of surfaces at each pixel in an image, typically represented as 3D vectors. In Figure 57, we compare GPT-4o, Gemini 2.0 Flash, and Marigold normals [48]. The results show that GPT-4o can generate reasonable results. However, since GPT-4o's output is an appealing normal map visualization, we want to clarify that this output is a color-coded visualization and does not directly provide the exact normal vector for each pixel. Thus, we cannot use lighting or other methods to verify the accuracy of the normal maps, and downstream tasks cannot use the output results. However, we also find some unreasonable details. In the third test case, common sense suggests that the ground should be flat, but GPT-4o predicts normals for these textured areas that differ from the surrounding areas.", + "bbox": [ + 125, + 114, + 870, + 227 + ], + "page_idx": 75 + }, + { + "type": "text", + "text": "Image-to-X", + "text_level": 1, + "bbox": [ + 214, + 252, + 310, + 268 + ], + "page_idx": 75 + }, + { + "type": "text", + "text": "Evaluation: Consistency/accuracy.", + "text_level": 1, + "bbox": [ + 375, + 282, + 648, + 299 + ], + "page_idx": 75 + }, + { + "type": "image", + "img_path": "images/652932d800457a58f1281b3ba28fa3df0773930a60325d39e942f44b98045c8e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 145, + 301, + 305, + 436 + ], + "page_idx": 75 + }, + { + "type": "image", + "img_path": "images/e099a5ea018e11845a670b91da9b9e084bcfb35931d3f4ead8ac754370ad78b3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 145, + 438, + 307, + 609 + ], + "page_idx": 75 + }, + { + "type": "image", + "img_path": "images/946a58c00f3ff3a1895e1b765823d31d38f03bbcd704ed7183effd358ebae58a.jpg", + "image_caption": [ + "Input Text: \"Generate the surface normal map of this picture.\"" + ], + "image_footnote": [], + "bbox": [ + 148, + 612, + 305, + 760 + ], + "page_idx": 75 + }, + { + "type": "image", + "img_path": "images/583b65ff347f460c795a71a3cedff21e9e48ec2217a26a74271927b6aa96f5d5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 323, + 301, + 480, + 436 + ], + "page_idx": 75 + }, + { + "type": "image", + "img_path": "images/e4c0179dfb831279ce07771cfe8cbac48857d6e750c966acbf75a9fd4eaf0a65.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 323, + 438, + 480, + 609 + ], + "page_idx": 75 + }, + { + "type": "image", + "img_path": "images/396cc4efe9e599f8f4c4b1bad0da4de3244d5eeed20ffeb09471237218d5adde.jpg", + "image_caption": [ + "Figure 57: Task: Image to X: Normal estimation. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Marigold [48]. Observation: This evaluation shows that GPT-4o has the capability of generating a visualization map of the surface normal, but the understanding of the details is still insufficient." + ], + "image_footnote": [], + "bbox": [ + 325, + 612, + 480, + 761 + ], + "page_idx": 75 + }, + { + "type": "image", + "img_path": "images/9d509d18edc90347ae9f6dcacd7565656a476da05b9ef46fdf5918a71a428c43.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 301, + 656, + 436 + ], + "page_idx": 75 + }, + { + "type": "image", + "img_path": "images/d2aadb5f85512222f6151124f2d78a026acca8ece755c80d998f65be5f2e8ae8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 439, + 656, + 609 + ], + "page_idx": 75 + }, + { + "type": "image", + "img_path": "images/5d604baed8d657358ea3a3aa09a0796cbab115ba03ae7dc21bef555277a6b92c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 612, + 656, + 761 + ], + "page_idx": 75 + }, + { + "type": "image", + "img_path": "images/e324eb93d8c4dfceef9d783883e1f613acfba48879633c0d04fd8039c2f4ba20.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 300, + 836, + 436 + ], + "page_idx": 75 + }, + { + "type": "image", + "img_path": "images/c748048db9bc4e6543e0723fd2782186695eaa29379aa62ac724941790f330f9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 678, + 439, + 836, + 609 + ], + "page_idx": 75 + }, + { + "type": "image", + "img_path": "images/69d386fd0754ccef0b8aa405ddd5368febd5edd73da8504508220e1ee0fb3ce3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 678, + 612, + 836, + 762 + ], + "page_idx": 75 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 183, + 792, + 276, + 808 + ], + "page_idx": 75 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 364, + 792, + 419, + 806 + ], + "page_idx": 75 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 506, + 792, + 625, + 806 + ], + "page_idx": 75 + }, + { + "type": "text", + "text": "Marigold", + "bbox": [ + 727, + 792, + 792, + 808 + ], + "page_idx": 75 + }, + { + "type": "page_number", + "text": "76", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 75 + }, + { + "type": "text", + "text": "2.4.6 Layout Detection", + "text_level": 1, + "bbox": [ + 127, + 90, + 302, + 104 + ], + "page_idx": 76 + }, + { + "type": "text", + "text": "The layout detection task requires the model to identify structural components (e.g., titles, paragraphs, tables, images) in the given image. In Figure 58, we compare the performance of GPT-4o, Gemini 2.0 Flash, and LayoutLMV3 [44] on the layout detection task. In the test cases, GPT-4o hallucinates layout elements that do not exist, although the final output is another document with \"layout detection\" results. If we consider the use in downstream tasks, such results are meaningless. Therefore, we conclude that GPT-4o is not capable of the layout detection task.", + "bbox": [ + 125, + 114, + 870, + 198 + ], + "page_idx": 76 + }, + { + "type": "text", + "text": "Image-to-X", + "text_level": 1, + "bbox": [ + 191, + 220, + 289, + 236 + ], + "page_idx": 76 + }, + { + "type": "image", + "img_path": "images/4ef72f42ea0ddf4ee1ec0367601e96f40b0391ec4c9fa27f967f8b50716f6305.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 247, + 372, + 263 + ], + "page_idx": 76 + }, + { + "type": "text", + "text": "Evaluation: Document Detection.", + "text_level": 1, + "bbox": [ + 375, + 251, + 643, + 266 + ], + "page_idx": 76 + }, + { + "type": "image", + "img_path": "images/2e8258faee6378598f378a9a63923d7d64ed24c4a8d936ba6407f7a1752ce76a.jpg", + "image_caption": [ + "Figure 58: Task: Image to X: Layout detection. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and LayoutLMV3 [44]. Observation: The results show that GPT-4o and Gemini frequently generate a different document but a correct detected layout." + ], + "image_footnote": [], + "bbox": [ + 147, + 276, + 316, + 412 + ], + "page_idx": 76 + }, + { + "type": "image", + "img_path": "images/235e18e587aa841842c1dc8ffd53a330c1345d64b6c3875f51db1f215dc6fc92.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 333, + 276, + 464, + 412 + ], + "page_idx": 76 + }, + { + "type": "image", + "img_path": "images/37e0cb1ddf2d5338758f440584a48a5f50fe2d534e9b2486a05a7690ed334ed9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 478, + 276, + 640, + 388 + ], + "page_idx": 76 + }, + { + "type": "image", + "img_path": "images/473d5c4fa40852da1be956bd91fc8dbb01033d977f73e000514033c57935eebb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 653, + 276, + 846, + 388 + ], + "page_idx": 76 + }, + { + "type": "text", + "text": "Input Text: \"Generate a new image which contains the layout detection results of the input image.\"", + "bbox": [ + 148, + 417, + 857, + 434 + ], + "page_idx": 76 + }, + { + "type": "image", + "img_path": "images/36374494cf58c80bf10d518e18df6bb7c26dfed3007927ce329edde282b717d7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 148, + 450, + 312, + 523 + ], + "page_idx": 76 + }, + { + "type": "image", + "img_path": "images/e80a0e37e52eb35595a7df3a6aa08948e175cfb951ba4bd65e8b98ab1887d928.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 316, + 449, + 480, + 527 + ], + "page_idx": 76 + }, + { + "type": "image", + "img_path": "images/b2bbda9be8e2840ce6f8b02ad35c98e1401b5728eb36e05879ea84bdd7c2ff9f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 483, + 449, + 663, + 529 + ], + "page_idx": 76 + }, + { + "type": "image", + "img_path": "images/d7357528516c7245255631ca4698524569152aafd5239b1a9cd9dfa6ee24c212.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 449, + 852, + 527 + ], + "page_idx": 76 + }, + { + "type": "text", + "text": "Input Text: \"Generate a new image which contains the layout detection results of the input image.\"", + "bbox": [ + 148, + 532, + 856, + 549 + ], + "page_idx": 76 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 179, + 558, + 271, + 573 + ], + "page_idx": 76 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 375, + 558, + 428, + 570 + ], + "page_idx": 76 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 516, + 558, + 635, + 570 + ], + "page_idx": 76 + }, + { + "type": "text", + "text": "LayoutLMV3", + "bbox": [ + 712, + 558, + 803, + 571 + ], + "page_idx": 76 + }, + { + "type": "page_number", + "text": "77", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 76 + }, + { + "type": "text", + "text": "2.4.7 Text Detection", + "text_level": 1, + "bbox": [ + 127, + 90, + 282, + 104 + ], + "page_idx": 77 + }, + { + "type": "text", + "text": "The text detection task requires the model to detect the texts in the given image. In Figure 59, we compare the performance of GPT-4o, Gemini 2.0 Flash [99], and CRAFT [3] regarding to text detection. We observe that CRAFT exhibits better performance compared to the other models.", + "bbox": [ + 125, + 114, + 869, + 157 + ], + "page_idx": 77 + }, + { + "type": "text", + "text": "In the first test case, GPT-4o demonstrates comparable performance to CRAFT. However, in other cases, GPT-4o continuously generates some nonexistent texts and labels them as \"text area\". This issue becomes particularly evident in cluttered scenes or images with complex backgrounds. These false positives not only reduce detection precision but also make the output less reliable for downstream tasks such as OCR or document understanding. On the other hand, Gemini does not generate nonexistent texts but tends to over-predict some areas as text areas.", + "bbox": [ + 125, + 162, + 869, + 233 + ], + "page_idx": 77 + }, + { + "type": "page_number", + "text": "78", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 77 + }, + { + "type": "text", + "text": "Image-to-X", + "bbox": [ + 196, + 136, + 292, + 154 + ], + "page_idx": 78 + }, + { + "type": "image", + "img_path": "images/80922918a65d3be2dc64726ee8e795ae73b3e97479ae0a61fb7cbf01bf4cb9c1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 380, + 161, + 400, + 178 + ], + "page_idx": 78 + }, + { + "type": "text", + "text": "Evaluation: Text Detection.", + "bbox": [ + 403, + 165, + 619, + 181 + ], + "page_idx": 78 + }, + { + "type": "image", + "img_path": "images/8d12c4277ccc433463d916e0d4c021703de904c087034505189b0c4f6bee4dc4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 151, + 189, + 320, + 290 + ], + "page_idx": 78 + }, + { + "type": "image", + "img_path": "images/d79345dd614218dc4a3fbadc34f3efbc6475c52df944a483358394a2ae83a4e5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 190, + 509, + 290 + ], + "page_idx": 78 + }, + { + "type": "image", + "img_path": "images/3b08ebeb2ba10b1056a1d7752ce27e2b4990f6b98a2f7acdbabcd32480037452.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 191, + 679, + 290 + ], + "page_idx": 78 + }, + { + "type": "image", + "img_path": "images/0a75ab15149c9afdb4641c46e30bd50461202b7c88fa785bef866a2691d27f84.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 191, + 848, + 289 + ], + "page_idx": 78 + }, + { + "type": "text", + "text": "Input Text: \"Generate a new image and label each line of text in the image with a green box\"", + "bbox": [ + 158, + 297, + 821, + 315 + ], + "page_idx": 78 + }, + { + "type": "image", + "img_path": "images/12fe1a849b28c956a4b37361bf45581ff6000a0b09f3ab5787b4647bbd9a3831.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 153, + 332, + 330, + 410 + ], + "page_idx": 78 + }, + { + "type": "image", + "img_path": "images/2ea1368e7787c97c60d6731352894c9323ed501fb6dc4e2d3cb1aa2c697b91f0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 332, + 486, + 410 + ], + "page_idx": 78 + }, + { + "type": "image", + "img_path": "images/5fd98c00acb24bb685d7ae786468c021486683a40b42e8431c28d0d29da3cf89.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 486, + 332, + 663, + 410 + ], + "page_idx": 78 + }, + { + "type": "image", + "img_path": "images/05076e4a3056272c3ce291b647bd2b2098f8bef74c7807b41ab1d48af70b1f0e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 333, + 844, + 411 + ], + "page_idx": 78 + }, + { + "type": "text", + "text": "Input Text: \"Generate a new image and label each line of text in the image with a green box\"", + "bbox": [ + 158, + 414, + 821, + 430 + ], + "page_idx": 78 + }, + { + "type": "image", + "img_path": "images/f2a66d298cf6681e639f60487d91eb2376e9527b2385d109001392557fd3fc3c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 153, + 438, + 326, + 614 + ], + "page_idx": 78 + }, + { + "type": "image", + "img_path": "images/0ceb9d3c24c10622601c5cd8f2bfe1590592e3f441bd27e97f56e47c7c523543.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 328, + 438, + 503, + 614 + ], + "page_idx": 78 + }, + { + "type": "image", + "img_path": "images/9b26a02ec460020f269c9e957ef3f51615c8aec20faba332c7ad88bddff5fcea.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 438, + 674, + 614 + ], + "page_idx": 78 + }, + { + "type": "image", + "img_path": "images/a4d89254c371d49daaab63ab7c798533a278ce2f2d6e10bca6da553223761a32.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 438, + 851, + 614 + ], + "page_idx": 78 + }, + { + "type": "text", + "text": "Input Text: \"Generate a new image and label each line of text in the image with a green box\"", + "bbox": [ + 158, + 626, + 821, + 643 + ], + "page_idx": 78 + }, + { + "type": "image", + "img_path": "images/023c34de6dd441f99db1b377780b4fdcd6666f8679ab43c519b68c515e5d0add.jpg", + "image_caption": [ + "Figure 59: Task: Image to X: Text detection. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and CRAFT [3]. Observation: The results show that GPT-4o frequently generates text that does not exist." + ], + "image_footnote": [], + "bbox": [ + 153, + 654, + 346, + 762 + ], + "page_idx": 78 + }, + { + "type": "image", + "img_path": "images/9d85af9012642e27d9952f9795e008d868506f471396fa6b593f956b82ca6fd0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 667, + 482, + 744 + ], + "page_idx": 78 + }, + { + "type": "image", + "img_path": "images/29eecd09cc58857aaac407a762e3d2472c1aae1724d08052e5c17f20df19b893.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 488, + 654, + 669, + 756 + ], + "page_idx": 78 + }, + { + "type": "image", + "img_path": "images/d4be8e3b705492a093a2511d862d76f96fe11049428146f4bcc9bedfb9524da6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 655, + 844, + 756 + ], + "page_idx": 78 + }, + { + "type": "text", + "text": "Input Text: \"Generate a new image and label each line of text in the image with a green box\"", + "bbox": [ + 161, + 768, + 821, + 785 + ], + "page_idx": 78 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 184, + 791, + 277, + 806 + ], + "page_idx": 78 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 379, + 791, + 433, + 805 + ], + "page_idx": 78 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 519, + 791, + 638, + 805 + ], + "page_idx": 78 + }, + { + "type": "text", + "text": "CRAFT", + "bbox": [ + 727, + 791, + 779, + 805 + ], + "page_idx": 78 + }, + { + "type": "page_number", + "text": "79", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 78 + }, + { + "type": "text", + "text": "2.4.8 Object Tracking", + "text_level": 1, + "bbox": [ + 127, + 90, + 295, + 107 + ], + "page_idx": 79 + }, + { + "type": "text", + "text": "The object tracking task requires the model to continuously locate and follow the specific object across the frames in a video sequence. We test the multi-object tracking, which requires the model to track several objects concurrently. We test four cases (Figure 60, 61, 62, 63). We compare GPT-4o, Gemini 2.0 Flash, and a recent SOTA method SAM-2 [86]. Our first observation is that GPT-4o seems unable to generate images that are consistent with the original image. This may be related to the nature of its generative model. Even if we ignore this, for the tracking task, SAM-2 still performs better, while GPT-4o will have problems such as failing to maintain consistent tracking of the target, frequently drifting, or losing the object entirely. In Figure 60, the output of GPT-4o generally demonstrates the ability to track objects, but there are also some defects. For example, a new object is even created out of the existing objects in the last picture generated by GPT-4o. We speculate that this is caused by the influence of the conversation context. In Figure 61, GPT-4o outputs some content that should not be in the output, such as the \"caf\" tag. In Figure 62, GPT-4o can track a relatively simple object, but it fuses two separate objects. In Figure 63, GPT-4o lacks the capability of tracking in the dense scenario.", + "bbox": [ + 124, + 114, + 870, + 282 + ], + "page_idx": 79 + }, + { + "type": "page_number", + "text": "80", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 79 + }, + { + "type": "text", + "text": "Evaluation: Object Tracking, Matching and Video Analysis.", + "text_level": 1, + "bbox": [ + 251, + 130, + 740, + 150 + ], + "page_idx": 80 + }, + { + "type": "image", + "img_path": "images/64bca9f9e552b5aa559f2be8f97c1db4addda01d6db566245d3a344dc004fa2b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 158, + 155, + 316, + 276 + ], + "page_idx": 80 + }, + { + "type": "image", + "img_path": "images/4c521d5dabb23441105ce25f3e4ea4fe5cdfd75cbe5ba899f0fa357d7cbf73e0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 156, + 488, + 276 + ], + "page_idx": 80 + }, + { + "type": "image", + "img_path": "images/eabf80aab7589d05f040c765b5655db13fd6ccc0f85020e3be5daad50092f0f9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 156, + 665, + 276 + ], + "page_idx": 80 + }, + { + "type": "image", + "img_path": "images/075019b013e4717772a6d76e14fbaf9862c46b4f8223096c86fa73cb18a7fffb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 156, + 838, + 276 + ], + "page_idx": 80 + }, + { + "type": "text", + "text": "Input Text: \"This is the first frame of a video where I've marked four targets with different colored bounding boxes. I'll subsequently provide you with other frames from this video for object tracking of these four targets. Understood?\"", + "bbox": [ + 158, + 277, + 826, + 321 + ], + "page_idx": 80 + }, + { + "type": "image", + "img_path": "images/03ac5a08165418ccecfd0363ad1b72667a075da1b20d2f84dff1cf04027bdc79.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 161, + 327, + 320, + 448 + ], + "page_idx": 80 + }, + { + "type": "image", + "img_path": "images/6075fec13975411e989e0044e4a2f0ee1c763038760b1eee8ab0a205a97ce7fd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 327, + 490, + 448 + ], + "page_idx": 80 + }, + { + "type": "image", + "img_path": "images/2ed33745b63fd77dc89a8024a98963f50198ec8a1e52ceda4ab8062eb4b2085d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 327, + 665, + 448 + ], + "page_idx": 80 + }, + { + "type": "image", + "img_path": "images/9dff828e49740629bf2ee9f75d69169e08193f97717d5fed56fe0f6c1a5bcc42.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 327, + 838, + 448 + ], + "page_idx": 80 + }, + { + "type": "text", + "text": "Input Text: \"You now need to perform object tracking on the four targets in this image and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"", + "bbox": [ + 158, + 450, + 807, + 494 + ], + "page_idx": 80 + }, + { + "type": "image", + "img_path": "images/679acf7cda0f660094595d465faedbcf9b28ca58cbf88966cb6e491aa278d75a.jpg", + "image_caption": [ + "Figure 60: Task: Image to X: Object tracking, matching, and video analysis (1/4). Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SAM-2 [86]. Observation: This evaluation shows that GPT-4o has the capability of tracking objects, but it cannot generate a consistent image compared to the input image." + ], + "image_footnote": [], + "bbox": [ + 161, + 498, + 318, + 619 + ], + "page_idx": 80 + }, + { + "type": "image", + "img_path": "images/770b79989d7eeca4a1a1dfe41749ee7619d15b0809497750a4ef3f48ae31bf1c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 498, + 488, + 619 + ], + "page_idx": 80 + }, + { + "type": "image", + "img_path": "images/10f93dc20ac60dddd7d9e915fac07726f076d66e2f81a27aa151917e0e62abcf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 498, + 665, + 619 + ], + "page_idx": 80 + }, + { + "type": "image", + "img_path": "images/8fc6d53c4cd8581616940efb6ac02413d37aece5c9462fc0de359572bfb0d570.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 500, + 838, + 619 + ], + "page_idx": 80 + }, + { + "type": "text", + "text": "Input Text: \"Continue tracking the four targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"", + "bbox": [ + 158, + 623, + 818, + 667 + ], + "page_idx": 80 + }, + { + "type": "image", + "img_path": "images/5d220158cfe75b892d7a3627f6bbfe90c6327f1ca348ff2386cb8e06bc0bd166.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 161, + 671, + 318, + 791 + ], + "page_idx": 80 + }, + { + "type": "image", + "img_path": "images/e5aa20f742421c44599c060ff033d5084778874fc965254ee47e49189abed76d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 671, + 488, + 791 + ], + "page_idx": 80 + }, + { + "type": "image", + "img_path": "images/9a9ede0bf4fe178006bfdab612511041503aa542b158bd5a1cafebee66aa0d19.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 671, + 665, + 791 + ], + "page_idx": 80 + }, + { + "type": "image", + "img_path": "images/4cb8d78d25db9472f21ac15da10a306c42418c98f395c4de73dab05c66763de4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 671, + 838, + 791 + ], + "page_idx": 80 + }, + { + "type": "text", + "text": "Input Text: \"Continue tracking the four targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"", + "bbox": [ + 158, + 800, + 818, + 844 + ], + "page_idx": 80 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 191, + 849, + 282, + 866 + ], + "page_idx": 80 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 383, + 849, + 436, + 863 + ], + "page_idx": 80 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 527, + 849, + 645, + 863 + ], + "page_idx": 80 + }, + { + "type": "text", + "text": "SAM2", + "bbox": [ + 745, + 849, + 790, + 862 + ], + "page_idx": 80 + }, + { + "type": "header", + "text": "Image-to-X", + "bbox": [ + 197, + 99, + 295, + 114 + ], + "page_idx": 80 + }, + { + "type": "page_number", + "text": "81", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 80 + }, + { + "type": "text", + "text": "Image-to-X", + "text_level": 1, + "bbox": [ + 197, + 183, + 295, + 200 + ], + "page_idx": 81 + }, + { + "type": "text", + "text": "Evaluation: Object Tracking, Matching and Video Analysis.", + "text_level": 1, + "bbox": [ + 251, + 212, + 741, + 231 + ], + "page_idx": 81 + }, + { + "type": "image", + "img_path": "images/123381a94924d4ad9fab326af061828f48aab18ef9bee3847f59fc56015e22fb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 158, + 241, + 320, + 316 + ], + "page_idx": 81 + }, + { + "type": "image", + "img_path": "images/17ab50440ba1a2025044fd3cd0741266c996928780e88a97d4c4333b56dfbe5a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 241, + 491, + 315 + ], + "page_idx": 81 + }, + { + "type": "image", + "img_path": "images/b44b50f9572bbd72865bce64600e8f10dc36affa85e764b23905c4db84176b3f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 241, + 668, + 316 + ], + "page_idx": 81 + }, + { + "type": "image", + "img_path": "images/1bdda49b07b096341fdec95b3c342e834c5c3255bfacb631d015bf9dcbf57d2b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 241, + 841, + 315 + ], + "page_idx": 81 + }, + { + "type": "text", + "text": "Input Text: \"This is the first frame of a video where I've marked three targets with different colored bounding boxes. I'll subsequently provide you with other frames from this video for object tracking of these three targets. Understood?\"", + "bbox": [ + 158, + 316, + 812, + 359 + ], + "page_idx": 81 + }, + { + "type": "image", + "img_path": "images/f266fde975c8808d3f3339c241e64e53840be40a286abf4b782d7dec9c606c51.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 161, + 363, + 320, + 434 + ], + "page_idx": 81 + }, + { + "type": "image", + "img_path": "images/6bd20b3ebf4a99d1a659fb4acf85915aae6729d1dae0be2eba9584761c9e9db7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 363, + 488, + 435 + ], + "page_idx": 81 + }, + { + "type": "image", + "img_path": "images/19ee65abb68b04bcc6de8f3231bd1dfc53040c57e2774f2a64269f4ba9efb4df.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 363, + 666, + 435 + ], + "page_idx": 81 + }, + { + "type": "image", + "img_path": "images/2864e1c72caa29f54357f917cad03463a7b5f8e666314b153768cc9c9bd444d6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 363, + 839, + 435 + ], + "page_idx": 81 + }, + { + "type": "text", + "text": "Input Text: \"You now need to perform object tracking on the three targets in this image and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"", + "bbox": [ + 158, + 436, + 815, + 479 + ], + "page_idx": 81 + }, + { + "type": "image", + "img_path": "images/17281215ee551aee19132f26e2f57e9634fc4c01686dbd233f99e38afdc31bac.jpg", + "image_caption": [ + "Figure 61: Task: Image to X: Object tracking, matching, and video analysis (2/4). Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SAM-2 [86]. Observation: This evaluation shows that GPT-4o has the capability of tracking objects, but it cannot generate a consistent image compared to the input image." + ], + "image_footnote": [], + "bbox": [ + 161, + 483, + 320, + 555 + ], + "page_idx": 81 + }, + { + "type": "image", + "img_path": "images/dd4f8ceb102f253e19dc4e99a539f2e28701caecb904ac3596b116d8f9213bed.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 483, + 488, + 555 + ], + "page_idx": 81 + }, + { + "type": "image", + "img_path": "images/288c7e6c2ba7c4828a38dbb2d93551531e7662268ca80529efd05e46c3b9cdc5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 483, + 666, + 555 + ], + "page_idx": 81 + }, + { + "type": "image", + "img_path": "images/b50fd5a292c370336aaa3e339bafb070bea7a7a2f80c934dad264af03b23bb2d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 483, + 839, + 555 + ], + "page_idx": 81 + }, + { + "type": "text", + "text": "Input Text: \"Continue tracking the three targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"", + "bbox": [ + 158, + 556, + 818, + 599 + ], + "page_idx": 81 + }, + { + "type": "image", + "img_path": "images/f835ef19b16a59831866bcf40d9d9bb5ac713048e65f176650e206cc3a2dd8ec.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 161, + 603, + 320, + 675 + ], + "page_idx": 81 + }, + { + "type": "image", + "img_path": "images/0be63390ebc394d88f7518d11a9497eca8059c414bc31788dd9f9bad0ee7ee64.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 603, + 488, + 675 + ], + "page_idx": 81 + }, + { + "type": "image", + "img_path": "images/b09f487669528fc2963ebabd0310ed69c5b420b7512e220fe3465e3c2ed5e12b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 603, + 666, + 675 + ], + "page_idx": 81 + }, + { + "type": "image", + "img_path": "images/1f674c14a05985ac60abfb2449ee82311b3fd74a5ce3e0baab98c7fbcc2f60c0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 679, + 603, + 836, + 675 + ], + "page_idx": 81 + }, + { + "type": "text", + "text": "Input Text: \"Continue tracking the three targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"", + "bbox": [ + 158, + 678, + 818, + 723 + ], + "page_idx": 81 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 192, + 729, + 285, + 744 + ], + "page_idx": 81 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 383, + 729, + 436, + 742 + ], + "page_idx": 81 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 526, + 729, + 643, + 742 + ], + "page_idx": 81 + }, + { + "type": "text", + "text": "SAM2", + "bbox": [ + 735, + 729, + 781, + 742 + ], + "page_idx": 81 + }, + { + "type": "page_number", + "text": "82", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 81 + }, + { + "type": "text", + "text": "Image-to-X", + "text_level": 1, + "bbox": [ + 197, + 220, + 294, + 236 + ], + "page_idx": 82 + }, + { + "type": "image", + "img_path": "images/2d588085b2425d35e86ead8fd151a9f1b0a00f306e88ed5971910bddaaa8e8b1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 251, + 247, + 271, + 265 + ], + "page_idx": 82 + }, + { + "type": "text", + "text": "Evaluation: Object Tracking, Matching and Video Analysis.", + "text_level": 1, + "bbox": [ + 272, + 251, + 738, + 268 + ], + "page_idx": 82 + }, + { + "type": "image", + "img_path": "images/a1e959570f347e3fef4bcc4ccda36cec01a4c6a5cdd272c2a85fbb3cc2dea20d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 158, + 273, + 321, + 328 + ], + "page_idx": 82 + }, + { + "type": "image", + "img_path": "images/c2af40c5244e0f72a0326fc6c92a7e15810a0e852285d96d92e3191254cf5eaa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 273, + 493, + 329 + ], + "page_idx": 82 + }, + { + "type": "image", + "img_path": "images/ddca6e48a5804db47d5999c3e273c4763a7ecac6ea67d75a092e6a71ab128b72.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 273, + 665, + 329 + ], + "page_idx": 82 + }, + { + "type": "image", + "img_path": "images/946427a8be6f45ad8290603f01d85b6ddddd4e891e723686e136a789ad2d67dc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 273, + 838, + 329 + ], + "page_idx": 82 + }, + { + "type": "text", + "text": "Input Text: \"This is the first frame of a video where I've marked four targets with different colored bounding boxes. I'll subsequently provide you with other frames from this video for object tracking of these four targets. Understood?\"", + "bbox": [ + 156, + 330, + 825, + 375 + ], + "page_idx": 82 + }, + { + "type": "image", + "img_path": "images/9e99b81570d109732b42b178967ae11e9ed7d965809fd7f2fd60d19ccbc7cd6d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 158, + 378, + 321, + 433 + ], + "page_idx": 82 + }, + { + "type": "image", + "img_path": "images/d39af4eb408498b81d98d8cb2ffed5b472bc1b7317c2c8a5374a4a56e4dfb056.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 378, + 493, + 433 + ], + "page_idx": 82 + }, + { + "type": "image", + "img_path": "images/9682ba919f23fd2efddf43910798451e8e7f75415d38ed0e818f82b242e57922.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 378, + 665, + 433 + ], + "page_idx": 82 + }, + { + "type": "image", + "img_path": "images/1cf0ebdbd8d076d3769a4ec23e0b1f57389127e1424e5db6a56d16c052155351.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 378, + 838, + 433 + ], + "page_idx": 82 + }, + { + "type": "text", + "text": "Input Text: \"You now need to perform object tracking on the four targets in this image and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"", + "bbox": [ + 156, + 435, + 808, + 478 + ], + "page_idx": 82 + }, + { + "type": "image", + "img_path": "images/f90e61bd1f023e6f33aef6353e15aca3f50253a83831ca3012b01fddcbeee732.jpg", + "image_caption": [ + "Figure 62: Task: Image to X: Object tracking, matching, and video analysis (3/4). Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SAM-2 [86]. Observation: This evaluation shows that GPT-4o has the capability of tracking objects, but it cannot generate a consistent image compared to the input image." + ], + "image_footnote": [], + "bbox": [ + 158, + 479, + 321, + 532 + ], + "page_idx": 82 + }, + { + "type": "image", + "img_path": "images/cb1c6099278ab7785abb32f0832616b8e30bed5c7c7f8049237a2dfc3dbc2c4a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 479, + 493, + 532 + ], + "page_idx": 82 + }, + { + "type": "image", + "img_path": "images/c437cfe35c54814ac08dea9c46d0e6240fb84f38e9174f214d7a634c29fe2495.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 479, + 666, + 532 + ], + "page_idx": 82 + }, + { + "type": "image", + "img_path": "images/4973b35320e8a2b303648fca1abc0044f2a37f44f7b47f898eb02a0074a2a7ba.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 479, + 838, + 532 + ], + "page_idx": 82 + }, + { + "type": "text", + "text": "Input Text: \"Continue tracking the four targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"", + "bbox": [ + 156, + 536, + 818, + 580 + ], + "page_idx": 82 + }, + { + "type": "image", + "img_path": "images/85f7bef2b593291c46513388cafafe9023a5e4f53bc7397e3a888a28d2cda80d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 158, + 583, + 321, + 638 + ], + "page_idx": 82 + }, + { + "type": "image", + "img_path": "images/a5b89b647f49cafe186c5a2f230a7d551fedbe9a4b6ab523991f91c5baceb76f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 583, + 493, + 638 + ], + "page_idx": 82 + }, + { + "type": "image", + "img_path": "images/21754ab30160b46ae72d81bb54a4d2c4f2af0b2cb67b22e08bf3c41755442754.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 583, + 666, + 638 + ], + "page_idx": 82 + }, + { + "type": "image", + "img_path": "images/d0e994754d3ed22f81935820cf17a41860327c42094e23bb6b7a579509ca0543.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 583, + 838, + 638 + ], + "page_idx": 82 + }, + { + "type": "text", + "text": "Input Text: \"Continue tracking the four targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"", + "bbox": [ + 156, + 642, + 818, + 686 + ], + "page_idx": 82 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 197, + 691, + 289, + 707 + ], + "page_idx": 82 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 393, + 691, + 444, + 705 + ], + "page_idx": 82 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 526, + 691, + 643, + 705 + ], + "page_idx": 82 + }, + { + "type": "text", + "text": "SAM2", + "bbox": [ + 736, + 693, + 782, + 705 + ], + "page_idx": 82 + }, + { + "type": "page_number", + "text": "83", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 82 + }, + { + "type": "text", + "text": "Image-to-X", + "text_level": 1, + "bbox": [ + 197, + 188, + 294, + 205 + ], + "page_idx": 83 + }, + { + "type": "text", + "text": "Evaluation: Object Tracking, Matching and Video Analysis.", + "text_level": 1, + "bbox": [ + 251, + 214, + 741, + 233 + ], + "page_idx": 83 + }, + { + "type": "image", + "img_path": "images/9801dce9b0ee5070b386427554ed58b6a513f89be080d33cee85238e85084ce4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 158, + 241, + 321, + 314 + ], + "page_idx": 83 + }, + { + "type": "image", + "img_path": "images/47c0aa44b5c1e668ed4e267b28409511fe44987d1f237b43ee033cba301e51ee.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 241, + 493, + 314 + ], + "page_idx": 83 + }, + { + "type": "image", + "img_path": "images/c5d213403e828619e550ddf5ada406e961fa2efb34275366ed904f09ea4b9216.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 241, + 666, + 314 + ], + "page_idx": 83 + }, + { + "type": "image", + "img_path": "images/13971dbe9a9a59527de67d029db2291646e369394e8bee5fe2ff780074999bab.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 241, + 838, + 314 + ], + "page_idx": 83 + }, + { + "type": "text", + "text": "Input Text: \"This is the first frame of a video where I've marked six targets with different colored bounding boxes. I'll subsequently provide you with other frames from this video for object tracking of these six targets. Understood?\"", + "bbox": [ + 156, + 315, + 815, + 357 + ], + "page_idx": 83 + }, + { + "type": "image", + "img_path": "images/4f4a54c95a2c61f0b917046158f22c0ac2fb4eaa7c0013af0028c8b141cd8788.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 160, + 359, + 321, + 431 + ], + "page_idx": 83 + }, + { + "type": "image", + "img_path": "images/11029002e14154e37a105bc63a9acff74ed50d918df4d80c2a9f039305b6e741.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 359, + 493, + 431 + ], + "page_idx": 83 + }, + { + "type": "image", + "img_path": "images/915695ce6be4cdda6f09d7deabc426642440f267759651fc6ec01c427928bd50.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 359, + 665, + 431 + ], + "page_idx": 83 + }, + { + "type": "image", + "img_path": "images/2b470b9a2e3376254260fb3776461599c8d766e1b076eaec1370e72b4b2e5b06.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 359, + 836, + 431 + ], + "page_idx": 83 + }, + { + "type": "text", + "text": "Input Text: \"You now need to perform object tracking on the six targets in this image and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"", + "bbox": [ + 156, + 433, + 808, + 476 + ], + "page_idx": 83 + }, + { + "type": "image", + "img_path": "images/4610ae5599e3f85d65d10713443204f4af850e0751404e619cfc018f179db18b.jpg", + "image_caption": [ + "Figure 63: Task: Image to X: Object tracking, matching, and video analysis (4/4). Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SAM-2 [86]. Observation: This evaluation shows that GPT-4o has the capability of tracking objects, but it cannot generate a consistent image compared to the input image." + ], + "image_footnote": [], + "bbox": [ + 160, + 479, + 321, + 551 + ], + "page_idx": 83 + }, + { + "type": "image", + "img_path": "images/17df35e7ffc6198a7ee15be18d8062a7c6c3846eac54441a5167179ad1116281.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 479, + 493, + 551 + ], + "page_idx": 83 + }, + { + "type": "image", + "img_path": "images/3979db3e010b09be23fdd79665b9946d9fa4adc89d8766528d7afd7b653cf603.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 479, + 666, + 551 + ], + "page_idx": 83 + }, + { + "type": "image", + "img_path": "images/136b68479499a83964864cd84b1bd318f9fd41257d6d18e9f8d976b00b3c4581.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 479, + 836, + 551 + ], + "page_idx": 83 + }, + { + "type": "text", + "text": "Input Text: \"Continue tracking the six targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"", + "bbox": [ + 156, + 554, + 818, + 598 + ], + "page_idx": 83 + }, + { + "type": "image", + "img_path": "images/ee95015664fc94518401d21a2920ffd79e12d1ce90aaee7e7c2e23d8fb252353.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 161, + 601, + 321, + 672 + ], + "page_idx": 83 + }, + { + "type": "image", + "img_path": "images/9bb8f63af64ed04966c8891cf4cf6ec49bd44f0d1a11eca0e431b45d7918994e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 601, + 493, + 672 + ], + "page_idx": 83 + }, + { + "type": "image", + "img_path": "images/b2936841b119f7c4783c46da417c26650df828319071a28b934df3513e885121.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 601, + 666, + 672 + ], + "page_idx": 83 + }, + { + "type": "image", + "img_path": "images/a093ca2444e0583bf58d16a259862c8f04e828919d40f21a081d04a7116fc357.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 601, + 836, + 672 + ], + "page_idx": 83 + }, + { + "type": "text", + "text": "Input Text: \"Continue tracking the six targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"", + "bbox": [ + 156, + 674, + 818, + 717 + ], + "page_idx": 83 + }, + { + "type": "text", + "text": "Input Image", + "bbox": [ + 197, + 724, + 290, + 739 + ], + "page_idx": 83 + }, + { + "type": "text", + "text": "GPT 40", + "bbox": [ + 385, + 724, + 439, + 737 + ], + "page_idx": 83 + }, + { + "type": "text", + "text": "Gemini 2.0 Flash", + "bbox": [ + 526, + 724, + 643, + 737 + ], + "page_idx": 83 + }, + { + "type": "text", + "text": "SAM2", + "bbox": [ + 733, + 724, + 779, + 737 + ], + "page_idx": 83 + }, + { + "type": "page_number", + "text": "84", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 83 + }, + { + "type": "text", + "text": "3 Limitations", + "text_level": 1, + "bbox": [ + 127, + 89, + 259, + 104 + ], + "page_idx": 84 + }, + { + "type": "text", + "text": "Although GPT-4o demonstrates impressive capabilities across a wide range of image generation tasks, several limitations remain. These challenges highlight key areas for future improvement in developing unified foundation models for vision-language generation.", + "bbox": [ + 125, + 121, + 869, + 165 + ], + "page_idx": 84 + }, + { + "type": "text", + "text": "3.1 Inconsistent Generation", + "text_level": 1, + "bbox": [ + 127, + 181, + 336, + 195 + ], + "page_idx": 84 + }, + { + "type": "text", + "text": "While GPT-4o often produces high-quality and semantically relevant images conditioned on textual prompts, it occasionally exhibits inconsistencies. Specifically, the model may generate visually compelling outputs that deviate from precise semantic cues of the input image, such as object count, spatial layout, specific shapes, or designated colors. These inconsistencies are especially problematic in tasks requiring partial image editing or compositional accuracy. Notably, such issues are less common in diffusion-based models or discrete denoising architectures like MaskGIT [11, 6], suggesting that GPT-4o operates under a distinct generative paradigm with inherent trade-offs in fidelity and control.", + "bbox": [ + 125, + 207, + 867, + 305 + ], + "page_idx": 84 + }, + { + "type": "text", + "text": "3.2 Hallucination", + "text_level": 1, + "bbox": [ + 127, + 321, + 264, + 335 + ], + "page_idx": 84 + }, + { + "type": "text", + "text": "GPT-4o is also susceptible to hallucinations—producing content that is logically implausible, semantically inconsistent, or factually incorrect. These include fabricating non-existent objects or geographical features (e.g., imaginary islands or landmarks), and misrepresenting relationships between entities. Such errors are particularly prevalent in complex or underspecified prompts, where the model appears to rely on internal priors rather than grounded world knowledge. While hallucination is a common challenge across generative models, it poses notable limitations for real-world applications demanding precision, such as education, medical illustration, or scientific visualization.", + "bbox": [ + 125, + 348, + 867, + 434 + ], + "page_idx": 84 + }, + { + "type": "text", + "text": "3.3 Data Bias", + "text_level": 1, + "bbox": [ + 127, + 450, + 238, + 463 + ], + "page_idx": 84 + }, + { + "type": "text", + "text": "Despite strong alignment between text and vision modalities, GPT-4o struggles with data bias issue, which fail in generating underrepresented cultural elements and rendering non-Latin scripts such as Chinese, Japanese, and Arabic. The generated characters are often incomplete, distorted, or replaced with Latin-like approximations. These artifacts reflect underlying challenges in multilingual representation, likely due to limited exposure to diverse scripts during training and the inherent difficulty of accurate typographic rendering in pixel space. This phenomenon is emblematic of a larger issue in AI systems—data bias. The training data used to develop models like GPT-4o may disproportionately represent certain languages, cultures, and writing systems, leading to disparities in performance across different linguistic groups. These biases are not only technical limitations but also ethical concerns, as they can contribute to the exclusion of underrepresented languages and cultures from AI applications. As vision-language models are increasingly deployed globally, improving support for multilingual text remains a crucial step toward inclusive and culturally competent AI systems.", + "bbox": [ + 125, + 474, + 869, + 628 + ], + "page_idx": 84 + }, + { + "type": "text", + "text": "4 Conclusion", + "text_level": 1, + "bbox": [ + 125, + 650, + 256, + 666 + ], + "page_idx": 84 + }, + { + "type": "text", + "text": "In conclusion, this work presents a comprehensive study on the development of unified vision-language generative models, with a focus on evaluating GPT-4o across a wide range of image generation tasks. Our analysis shows that GPT-4o demonstrates strong capabilities in aligning vision and language, achieving competitive results across text-to-image, image-to-image, image-to-3D, and image-to-X tasks. However, limitations remain in inconsistent generation, hallucination, and data bias in underrepresented cultural elements and non-Latin scripts, highlighting current trade-offs in model design and training data coverage. We also emphasize that architecture alone does not determine success; training data, model scale, and optimization strategies are equally critical components of progress. We hope future work will provide deeper empirical insights into such proprietary systems and clarify their position within the broader landscape of unified generative modeling.", + "bbox": [ + 125, + 681, + 867, + 808 + ], + "page_idx": 84 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 127, + 825, + 223, + 842 + ], + "page_idx": 84 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Hao Ai, Zidong Cao, Haonan Lu, Chen Chen, Jian Ma, Pengyuan Zhou, Tae-Kyun Kim, Pan Hui, and Lin Wang. Dream360: Diverse and immersive outdoor virtual scene creation via transformer-based 360 image outpainting. IEEE transactions on visualization and computer graphics, 2024. 34, 42", + "[2] Ideogram AI. Ideogram. https://ideogram.ai/, 2024. 10, 11, 12" + ], + "bbox": [ + 140, + 853, + 867, + 911 + ], + "page_idx": 84 + }, + { + "type": "page_number", + "text": "85", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 84 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[3] Youngmin Baek, Bado Lee, Dongyoon Han, Sangdoo Yun, and Hwalsuk Lee. Character region awareness for text detection. In CVPR, 2019. 78, 79", + "[4] Jinbin Bai, Wei Chow, Ling Yang, Xiangtai Li, Juncheng Li, Hanwang Zhang, and Shuicheng Yan. Humanedit: A high-quality human-rewarded dataset for instruction-based image editing. arXiv preprint arXiv:2412.04280, 2024. 21", + "[5] Jinbin Bai, Zhen Dong, Aosong Feng, Xiao Zhang, Tian Ye, Kaicheng Zhou, and Mike Zheng Shou. Integrating view conditions for image synthesis. arXiv preprint arXiv:2310.16002, 2023. 21", + "[6] Jinbin Bai, Tian Ye, Wei Chow, Enxin Song, Qing-Guo Chen, Xiangtai Li, Zhen Dong, Lei Zhu, and Shuicheng Yan. Meissonic: Revitalizing masked generative transformers for efficient high-resolution text-to-image synthesis. arXiv preprint arXiv:2410.08261, 2024. 5, 85", + "[7] Shane Barratt and Rishi Sharma. A note on the inception score. arXiv preprint arXiv:1801.01973, 2018. 1", + "[8] James Betker, Gabriel Goh, Li Jing, Tim Brooks, Jianfeng Wang, Linjie Li, Long Ouyang, Juntang Zhuang, Joyce Lee, Yufei Guo, et al. Improving image generation with better captions. Computer Science. https://cdn.openai.com/papers/dall-e-3.pdf, 2023.5", + "[9] Manuel Brack, Felix Friedrich, Katharina Kornmeier, Linoy Tsaban, Patrick Schramowski, Kristian Kersting, and Apolinário Passos. *Ledits++: Limitless image editing using text-to-image models.* 2023. 21, 25", + "[10] Tim Brooks, Aleksander Holynski, and Alexei A Efros. Instructpix2pix: Learning to follow image editing instructions. arXiv preprint arXiv:2211.09800, 2022. 21", + "[11] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11315-11325, 2022. 85", + "[12] Haoyu Chen, Xiaojie Xu, Wenbo Li, Jingjing Ren, Tian Ye, Songhua Liu, Ying-Cong Chen, Lei Zhu, and Xinchao Wang. Posta: A go-to framework for customized artistic poster generation. arXiv preprint arXiv:2503.14908, 2025. 10, 12", + "[13] Liang Chen, Shuai Bai, Wenhao Chai, Weichu Xie, Haozhe Zhao, Leon Vinci, Junyang Lin, and Baobao Chang. Multimodal representation alignment for image generation: Text-image interleaved control is easier than you think. arXiv preprint arXiv:2502.20172, 2025. 1", + "[14] Liang-Chieh Chen, George Papandreou, Florian Schroff, and Hartwig Adam. Rethinking atrous convolution for semantic image segmentation. arXiv preprint arXiv:1706.05587, 2017. 62, 64", + "[15] Sixiang Chen, Tian Ye, Jinbin Bai, Erkang Chen, Jun Shi, and Lei Zhu. Sparse sampling transformer with uncertainty-driven ranking for unified removal of raindrops and rain streaks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 13106-13117, 2023. 34", + "[16] Sixiang Chen, Tian Ye, Yun Liu, and Erkang Chen. Snowformer: Context interaction transformer with scale-awareness for single image desnowing. arXiv preprint arXiv:2208.09703, 2022. 34", + "[17] Sixiang Chen, Tian Ye, Kai Zhang, Zhaohu Xing, Yunlong Lin, and Lei Zhu. Teaching tailored to talent: Adverse weather restoration via prompt pool and depth-anything constraint. In European Conference on Computer Vision, pages 95–115. Springer, 2024. 34", + "[18] Tianqi Chen, Yongfei Liu, Zhendong Wang, Jianbo Yuan, Quanzeng You, Hongxia Yang, and Mingyuan Zhou. Improving in-context learning in diffusion models with visual context-modulated prompts. arXiv preprint arXiv:2312.01408, 2023. 56", + "[19] Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Janus-pro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025. 1", + "[20] Marcos V. Conde, Gregor Geigle, and Radu Timofte. Instructir: High-quality image restoration following human instructions. In ECCV, 2024. 34, 35, 36, 37, 38, 39, 40", + "[21] Runmin Cong, Yuchen Guan, Jinpeng Chen, Wei Zhang, Yao Zhao, and Sam Kwong. Sddnet: Style-guided dual-layer disentanglement network for shadow detection. In ACM MM, 2023. 69, 72", + "[22] Ciprian Corneanu, Raghudeep Gadde, and Aleix M Martinez. Latentpaint: Image inpainting in latent space with diffusion models. In WACV, 2024. 34, 41", + "[23] Yingying Deng, Fan Tang, Weiming Dong, Chongyang Ma, Xingjia Pan, Lei Wang, and Changsheng Xu. Stytr2: Image style transfer with transformers. In CVPR, 2022. 18", + "[24] Runpei Dong, Chunrui Han, Yuang Peng, Zekun Qi, Zheng Ge, Jinrong Yang, Liang Zhao, Jianjian Sun, Hongyu Zhou, Haoran Wei, et al. Dreamlmm: Synergistic multimodal comprehension and creation. arXiv preprint arXiv:2309.11499, 2023. 1", + "[25] Wei Dong, Han Zhou, Yuqiong Tian, Jingke Sun, Xiaohong Liu, Guangtao Zhai, and Jun Chen. Shadowrefiner: Towards mask-free shadow removal via fast fourier transformer. arXiv preprint arXiv:2406.02559. 44", + "[26] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. 1" + ], + "bbox": [ + 135, + 90, + 870, + 911 + ], + "page_idx": 85 + }, + { + "type": "page_number", + "text": "86", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 85 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[27] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first international conference on machine learning*, 2024. 10, 11, 47, 51", + "[28] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 12873-12883, 2021. 1", + "[29] Aosong Feng, Weikang Qiu, Jinbin Bai, Kaicheng Zhou, Zhen Dong, Xiao Zhang, Rex Ying, and Leandros Tassiulas. An item is worth a prompt: Versatile image editing with disentangled control. arXiv preprint arXiv:2403.04880, 2024. 21", + "[30] Tsu-Jui Fu, Wenze Hu, Xianzhi Du, William Yang Wang, Yinfei Yang, and Zhe Gan. Guiding instruction-based image editing via multimodal large language models. ICLR, 2024. 21, 22, 23, 24", + "[31] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. ICLR, 2023. 28", + "[32] Jun Gao, Tianchang Shen, Zian Wang, Wenzheng Chen, Kangxue Yin, Daiqing Li, Or Litany, Zan Gojcic, and Sanja Fidler. Get3d: A generative model of high quality 3d textured shapes learned from images. NeurIPS, 2022. 58", + "[33] Leon A Gatys, Alexander S Ecker, and Matthias Bethge. Image style transfer using convolutional neural networks. CVPR, 2016. 18", + "[34] Yuying Ge, Sijie Zhao, Jinguo Zhu, Yixiao Ge, Kun Yi, Lin Song, Chen Li, Xiaohan Ding, and Ying Shan. Seed-x: Multimodal models with unified multi-granularity comprehension and generation. arXiv preprint arXiv:2404.14396, 2024.1", + "[35] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks. Communications of the ACM, 63(11):139–144, 2020. 1", + "[36] Yuchao Gu, Xintao Wang, Jay Zhangjie Wu, Yujun Shi, Yunpeng Chen, Zihan Fan, Wuyou Xiao, Rui Zhao, Shuning Chang, Weijia Wu, et al. Mix-of-show: Decentralized low-rank adaptation for multi-concept customization of diffusion models. In NeurIPS, 2024. 28", + "[37] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. NeurIPS, 2017. 1", + "[38] Qibin Hou, Yuying Ge, Jing Zhang, Yuchao Dai, and Ming-Ming Cheng. Storydiffusion: Consistent self-attention for long-range image and video generation. In Advances in Neural Information Processing Systems (NeurIPS), 2024. 31, 32", + "[39] Qiming Hu, Hainuo Wang, and Xiaojie Guo. Single image reflection separation via dual-stream interactive transformers. Advances in Neural Information Processing Systems, 37:55228-55248, 2024. 45", + "[40] Jiancheng Huang, Yi Huang, Jianzhuang Liu, Donghao Zhou, Yifan Liu, and Shifeng Chen. Dual-schedule inversion: Training-and tuning-free inversion for real image editing. arXiv preprint arXiv:2412.11152, 2024. 21", + "[41] Kaiyi Huang, Chengqi Duan, Kaiyue Sun, Enze Xie, Zhenguo Li, and Xihui Liu. T2i-compbench++: An enhanced and comprehensive benchmark for compositional text-to-image generation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2025. 5", + "[42] Lianghua Huang, Wei Wang, Zhi-Fan Wu, Yupeng Shi, Huanzhang Dou, Chen Liang, Yutong Feng, Yu Liu, and Jingren Zhou. In-context lora for diffusion transformers. arXiv preprint arXiv:2410.23775, 2024. 56", + "[43] Xun Huang and Serge Belongie. Arbitrary style transfer in real-time with adaptive instance normalization. In ICCV, 2017. 18", + "[44] Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, and Furu Wei. Layoutlmv3: Pre-training for document ai with unified text and image masking. In ACM MM, 2022. 77", + "[45] Zixuan Huang, Stefan Stojanov, Anh Thai, Varun Jampani, and James M Rehg. Planes vs. chairs: Category-guided 3d shape learning without any 3d cues. In ECCV, 2022. 58", + "[46] Jiaxiu Jiang, Yabo Zhang, Kailai Feng, Xiaohe Wu, Wenbo Li, Renjing Pei, Fan Li, and Wangmeng Zuo. Mc2: Multi-concept guidance for customized multi-concept generation. arXiv preprint arXiv:2404.05268, 2024. 28", + "[47] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In ECCV, 2016. 18", + "[48] Bingxin Ke, Anton Obukhov, Shengyu Huang, Nando Metzger, Rodrigo Caye Daudt, and Konrad Schindler. Repurposing diffusion-based image generators for monocular depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 76", + "[49] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In ICCV, 2023. 56", + "[50] Nupur Kumari, Bingliang Zhang, Richard Zhang, Eli Shechtman, and Jun-Yan Zhu. Multi-concept customization of text-to-image diffusion. In CVPR, 2023. 28", + "[51] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024. 2, 5, 8, 9, 10, 11, 47, 48, 49" + ], + "bbox": [ + 135, + 90, + 870, + 912 + ], + "page_idx": 86 + }, + { + "type": "page_number", + "text": "87", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 86 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[52] Bolin Lai, Felix Juefei-Xu, Miao Liu, Xiaoliang Dai, Nikhil Mehta, Chenguang Zhu, Zeyi Huang, James M Rehg, Sang-min Lee, Ning Zhang, et al. Unleashing in-context learning of autoregressive models for few-shot image manipulation. arXiv preprint arXiv:2412.01027, 2024. 56", + "[53] Jiachen Li, Jitesh Jain, and Humphrey Shi. Matting anything. arXiv: 2306.05399, 2023. 66", + "[54] Jiachen Li, Jitesh Jain, and Humphrey Shi. Matting anything. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1775–1785, 2024. 68", + "[55] Junyi Li, Zhilu Zhang, Xiaoyu Liu, Chaoyu Feng, Xiaotao Wang, Lei Lei, and Wangmeng Zuo. Spatially adaptive self-supervised learning for real-world image denoising. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2023. 34", + "[56] Yachuan Li, Xavier Soria Poma, Yun Bai, Qian Xiao, Chaozhi Yang, Guanlin Li, and Zongmin Li. Edmb: Edge detector with mamba. arXiv preprint arXiv:2501.04846, 2025. 66, 67", + "[57] Yijun Li, Chen Fang, Jimei Yang, Zhaowen Wang, Xin Lu, and Ming-Hsuan Yang. Universal style transfer via feature transforms. In NIPS, 2017. 18", + "[58] Zijie Li, Henry Li, Yichun Shi, Amir Barati Farimani, Yuval Kluger, Linjie Yang, and Peng Wang. Dual diffusion for unified image generation and understanding. arXiv preprint arXiv:2501.00289, 2024. 2", + "[59] Zhexin Liang, Zhaochen Li, Shangchen Zhou, Chongyi Li, and Chen Change Loy. Control color: Multimodal diffusion-based interactive image colorization. arXiv preprint arXiv:2402.10855, 2024. 34, 43", + "[60] Xin Lin, Chao Ren, Kelvin CK Chan, Lu Qi, Jinshan Pan, and Ming-Hsuan Yang. Multi-task image restoration guided by robust dino features. arXiv preprint arXiv:2312.01677, 2023. 34", + "[61] Xin Lin, Chao Ren, and Xiao Liu. Unsupervised image denoising in real-world scenarios via self-collaboration parallel generative adversarial branches. In ICCV, 2023. 34", + "[62] Xin Lin, Jingtong Yue, Sixian Ding, Chao Ren, Lu Qi, and Ming-Hsuan Yang. Dual degradation representation for joint deraining and low-light enhancement in the dark. IEEE Transactions on Circuits and Systems for Video Technology, 2024. 34", + "[63] Xin Lin, Yuyan Zhou, Jingtong Yue, Chao Ren, Kelvin CK Chan, Lu Qi, and Ming-Hsuan Yang. Re-boosting self-collaboration parallel prompt gan for unsupervised image restoration. arXiv preprint arXiv:2408.09241, 2024. 34", + "[64] Bingchen Liu, Ehsan Akhgari, Alexander Visheratin, Aleks Kamko, Linmiao Xu, Shivam Shrirao, Chase Lambert, Joao Souza, Suhail Doshi, and Daiqing Li. Playground v3: Improving text-to-image alignment with deep-fusion large language models. arXiv preprint arXiv:2409.10695, 2024. 10, 12, 14", + "[65] Dongyang Liu, Shitian Zhao, Le Zhuo, Weifeng Lin, Yu Qiao, Hongsheng Li, and Peng Gao. Lumina-mgpt: Illuminate flexible photorealistic text-to-image generation with multimodal generative pretraining, 2024. 1", + "[66] Haipeng Liu, Yang Wang, Biao Qian, Meng Wang, and Yong Rui. Structure matters: Tackling the semantic discrepancy in diffusion models for image inpainting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 34, 42", + "[67] Hao Liu, Wilson Yan, Matei Zaharia, and Pieter Abbeel. World model on million-length video and language with ringattention. arXiv preprint arXiv:2402.08268, 2024. 1", + "[68] Minghua Liu, Chao Xu, Haian Jin, Linghao Chen, Mukund Varma T, Zexiang Xu, and Hao Su. One-2-3-45: Any single image to 3d mesh in 45 seconds without per-shape optimization. Advances in Neural Information Processing Systems, 2023. 58", + "[69] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object. In Proceedings of the IEEE/CVF international conference on computer vision, 2023. 58", + "[70] Xiaoxiao Long, Yuan-Chen Guo, Cheng Lin, Yuan Liu, Zhiyang Dou, Lingjie Liu, Yuexin Ma, Song-Hai Zhang, Marc Habermann, Christian Theobalt, et al. Wonder3d: Single image to 3d using cross-domain diffusion. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2024. 58", + "[71] Aaron Lou, Chenlin Meng, and Stefano Ermon. Discrete diffusion modeling by estimating the ratios of the data distribution. arXiv preprint arXiv:2310.16834, 2023. 2", + "[72] Yiyang Ma, Xingchao Liu, Xiaokang Chen, Wen Liu, Chengyue Wu, Zhiyu Wu, Zizheng Pan, Zhenda Xie, Haowei Zhang, Liang Zhao, et al. Janusflow: Harmonizing autoregression and rectified flow for unified multimodal understanding and generation. arXiv preprint arXiv:2411.07975, 2024. 2", + "[73] Chenlin Meng, Kristy Choi, Jiaming Song, and Stefano Ermon. Concrete score matching: Generalized score matching for discrete data. Advances in Neural Information Processing Systems, 35:34532-34545, 2022. 2", + "[74] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2019. 58", + "[75] Midjourney. Midjourney. https://www.midjourney.com, 2024. 2, 6, 7, 18, 19, 20, 59, 60, 61" + ], + "bbox": [ + 135, + 90, + 870, + 912 + ], + "page_idx": 87 + }, + { + "type": "page_number", + "text": "88", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 87 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[76] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 2021. 58", + "[77] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2020. 58", + "[78] OpenAI. Addendum to gpt-4o system card: 4o image generation, 2025. Accessed: 2025-04-02. 2", + "[79] Junyi Pan, Xiaoguang Han, Weikai Chen, Jiapeng Tang, and Kui Jia. Deep mesh reconstruction from single rgb images via topology modification networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2019. 58", + "[80] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed AA Osman, Dimitrios Tzionas, and Michael J Black. Expressive body capture: 3d hands, face, and body from a single image. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2019. 58", + "[81] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023. 5", + "[82] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. SDXL: Improving latent diffusion models for high-resolution image synthesis. In The Twelfth International Conference on Learning Representations (ICLR), 2024. 1, 47, 50", + "[83] Guocheng Qian, Jinjie Mai, Abdullah Hamdi, Jian Ren, Aliaksandr Siarohin, Bing Li, Hsin-Ying Lee, Ivan Skorokhodov, Peter Wonka, Sergey Tulyakov, et al. Magic123: One image to high-quality 3d object generation using both 2d and 3d diffusion priors. arXiv preprint arXiv:2306.17843, 2023. 58", + "[84] Chu-Jie Qin, Rui-Qi Wu, Zikun Liu, Xin Lin, Chun-Le Guo, Hyun Hee Park, and Chongyi Li. Restore anything with masks: Leveraging mask image modeling for blind all-in-one image restoration. In ECCV, 2024. 34", + "[85] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 5", + "[86] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Radle, Chloe Rolland, Laura Gustafson, et al. SAM 2: Segment anything in images and videos. *ICLR*, 2025. 80, 81, 82, 83, 84", + "[87] Jeremy Reizenstein, Roman Shapovalov, Philipp Henzler, Luca Sbordone, Patrick Labatut, and David Novotny. Common objects in 3d: Large-scale learning and evaluation of real-life 3d category reconstruction. In ICCV, 2021. 58", + "[88] Bin Ren, Yawei Li, Nancy Mehta, and Radu Timofte. The ninth nitire 2024 efficient super-resolution challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2024. 34", + "[89] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 10684-10695, 2022. 1", + "[90] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10684-10695, June 2022. 47, 52", + "[91] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In CVPR, 2023. 28", + "[92] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 2022. 5", + "[93] Subham Sahoo, Marianne Arriola, Yair Schiff, Aaron Gokaslan, Edgar Marroquin, Justin Chiu, Alexander Rush, and Volodymyr Kuleshov. Simple and effective masked diffusion language models. Advances in Neural Information Processing Systems, 37:130136-130184, 2024. 2", + "[94] Qingyu Shi, Lu Qi, Jianzong Wu, Jinbin Bai, Jingbo Wang, Yunhai Tong, Xiangtai Li, and Ming-Husan Yang. Relation- booth: Towards relation-aware customized object generation. arXiv preprint arXiv:2410.23280, 2024. 28", + "[95] Haoze Sun, Wenbo Li, Jianzhuang Liu, Haoyu Chen, Renjing Pei, Xueyi Zou, Youliang Yan, and Yujiu Yang. Coser: Bridging image and language for cognitive super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 25868-25878, 2024. 34", + "[96] Quan Sun, Qiying Yu, Yufeng Cui, Fan Zhang, Xiaosong Zhang, Yueze Wang, Hongcheng Gao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative pretraining in multimodality. arXiv preprint arXiv:2307.05222, 2023.1", + "[97] Alexander Swerdlow, Mihir Prabhudesai, Siddharth Gandhi, Deepak Pathak, and Katerina Fragkiadaki. Unified multimodal discrete diffusion. arXiv preprint arXiv:2503.20853, 2025. 2", + "[98] Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024. 1" + ], + "bbox": [ + 135, + 90, + 870, + 912 + ], + "page_idx": 88 + }, + { + "type": "page_number", + "text": "89", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 88 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[99] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 2, 3, 5, 6, 7, 8, 9, 10, 12, 14, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 59, 60, 61, 63, 64, 65, 67, 68, 70, 71, 72, 73, 75, 76, 77, 78, 79, 81, 82, 83, 84", + "[100] Shengbang Tong, David Fan, Jiachen Zhu, Yunyang Xiong, Xinlei Chen, Koustuv Sinha, Michael Rabbat, Yann LeCun, Saining Xie, and Zhuang Liu. Metamorph: Multimodal understanding and generation via instruction tuning. arXiv preprint arXiv:2412.14164, 2024. 1", + "[101] Chunwei Wang, Guansong Lu, Junwei Yang, Runhui Huang, Jianhua Han, Lu Hou, Wei Zhang, and Hang Xu. Illumine: Illuminating your llms to see, draw, and self-enhance. arXiv preprint arXiv:2412.06673, 2024. 1", + "[102] Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, and Yu-Gang Jiang. Pixel2mesh: Generating 3d mesh models from single rgb images. In Proceedings of the European conference on computer vision (ECCV), 2018. 58", + "[103] Xierui Wang, Siming Fu, Qihan Huang, Wanggui He, and Hao Jiang. Ms-diffusion: Multi-subject zero-shot image personalization with layout guidance. arXiv preprint arXiv:2406.07209, 2024. 28, 30", + "[104] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 1", + "[105] Zhendong Wang, Yifan Jiang, Yadong Lu, Pengcheng He, Weizhu Chen, Zhangyang Wang, Mingyuan Zhou, et al. In-context learning unlocked for diffusion models. NeurIPS, 2023. 56", + "[106] Zhenyu Wang, Aoxue Li, Zhenguo Li, and Xihui Liu. Genartist: Multimodal llm as an agent for unified image generation and editing. NeurIPS, 2024. 5", + "[107] Alex Warren, Ke Xu, Jiaying Lin, Gary KL Tam, and Rynson WH Lau. Effective video mirror detection with inconsistent motion cues. In CVPR, 2024. 69, 71", + "[108] Jianzong Wu, Chao Tang, Jingbo Wang, Yanhong Zeng, Xiangtai Li, and Yunhai Tong. Diffensei: Bridging multi-modal lms and diffusion models for customized manga generation. CVPR, 2025. 31, 33", + "[109] Size Wu, Wenwei Zhang, Lumin Xu, Sheng Jin, Zhonghua Wu, Qingyi Tao, Wentao Liu, Wei Li, and Chen Change Loy. Harmonizing visual representations for unified multimodal understanding and generation. arXiv preprint arXiv:2503.21979, 2025. 1", + "[110] Yecheng Wu, Zhuoyang Zhang, Junyu Chen, Haotian Tang, Dacheng Li, Yunhao Fang, Ligeng Zhu, Enze Xie, Hongxu Yin, Li Yi, et al. Vila-u: a unified foundation model integrating visual understanding and generation. arXiv preprint arXiv:2409.04429, 2024. 1", + "[111] Yifan Xia, Yuying Ge, Jing Zhang, Yuchao Dai, and Ming-Ming Cheng. Seed-story: Multimodal long story generation with large language model. arXiv preprint arXiv:2407.08683, 2024. 31, 32", + "[112] Shitao Xiao, Yueze Wang, Junjie Zhou, Huaying Yuan, Xingrun Xing, Ruiran Yan, Shuting Wang, Tiejun Huang, and Zheng Liu. Omnigen: Unified image generation. arXiv preprint arXiv:2409.11340, 2024. 2", + "[113] Jiale Xu, Weihao Cheng, Yiming Gao, Xintao Wang, Shenghua Gao, and Ying Shan. Instantmesh: Efficient 3d mesh generation from a single image with sparse-view large reconstruction models. arXiv preprint arXiv:2404.07191, 2024.58", + "[114] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10371-10381, 2024. 74, 75", + "[115] Ling Yang, Zhaochen Yu, Chenlin Meng, Minkai Xu, Stefano Ermon, and Bin Cui. Mastering text-to-image diffusion: Recaptioning, planning, and generating with multimodal llms. In ICML, 2024. 5", + "[116] Hang Yu, Ruilin Li, Shaorong Xie, and Jiayan Qiu. Shadow-eligible image outpainting. In CVPR, 2024. 34, 42", + "[117] Haobo Yuan, Xiangtai Li, Tao Zhang, Zilong Huang, Shilin Xu, Shunping Ji, Yunhai Tong, Lu Qi, Jiashi Feng, and Ming-Hsuan Yang. Sa2va: Marrying sam2 with llava for dense grounded understanding of images and videos. arXiv, 2025. 62, 63", + "[118] Yu Yuan, Xijun Wang, Yichen Sheng, Prateek Chennuri, Xingguang Zhang, and Stanley Chan. Generative photography: Scene-consistent camera control for realistic text-to-image synthesis. arXiv preprint arXiv:2412.02168, 2024. 53, 54, 55", + "[119] Cheng Zhang, Qianyi Wu, Camilo Cruz Gambardella, Xiaoshui Huang, Dinh Phung, Wanli Ouyang, and Jianfei Cai. Taming stable diffusion for text to $360^{\\circ}$ panorama image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 17", + "[120] Kai Zhang, Lingbo Mo, Wenhu Chen, Huan Sun, and Yu Su. Magicbrush: A manually annotated dataset for instruction-guided image editing. In NeurIPS, 2023. 21, 25, 26, 27", + "[121] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models, 2023. 47" + ], + "bbox": [ + 129, + 90, + 870, + 910 + ], + "page_idx": 89 + }, + { + "type": "page_number", + "text": "90", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 89 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[122] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Scaling in-the-wild training for diffusion-based illumination harmonization and editing by imposing consistent light transport. In ICLR, 2025. 34, 46", + "[123] Wenwei Zhang, Jiangmiao Pang, Kai Chen, and Chen Change Loy. K-net: Towards unified image segmentation. Advances in Neural Information Processing Systems, 34:10326-10338, 2021. 62, 65", + "[124] Xinchen Zhang, Ling Yang, Guohao Li, Yaqi Cai, Jiake Xie, Yong Tang, Yujiu Yang, Mengdi Wang, and Bin Cui. Itercomp: Iterative composition-aware feedback learning from model gallery for text-to-image generation. arXiv preprint arXiv:2410.07171, 2024.5", + "[125] Yuxuan Zhang, Yiren Song, Jiaming Liu, Rui Wang, Jinpeng Yu, Hao Tang, Huaxia Li, Xu Tang, Yao Hu, Han Pan, et al. Ssr-encoder: Encoding selective subject representation for subject-driven generation. In CVPR, 2024. 28", + "[126] Chuyang Zhao, Yuxing Song, Wenhao Wang, Haocheng Feng, Errui Ding, Yifan Sun, Xinyan Xiao, and Jingdong Wang. Monofrformer: One transformer for both diffusion and autoregression. arXiv preprint arXiv:2409.16280, 2024. 2", + "[127] Peng Zheng, Dehong Gao, Deng-Ping Fan, Li Liu, Jorma Laaksonen, Wanli Ouyang, and Nicu Sebe. Bilateral reference for high-resolution dichotomous image segmentation. CCAI, 2024. 69, 70, 73", + "[128] Chunting Zhou, Lili Yu, Arun Babu, Kushal Tirumala, Michihiro Yasunaga, Leonid Shamis, Jacob Kahn, Xuezhe Ma, Luke Zettlemoyer, and Omer Levy. Transfusion: Predict the next token and diffuse images with one multi-modal model. arXiv preprint arXiv:2408.11039, 2024. 2", + "[129] Donghao Zhou, Jiancheng Huang, Jinbin Bai, Jiaze Wang, Hao Chen, Guangyong Chen, Xiaowei Hu, and Pheng-Ann Heng. MagicTailor: Component-controllable personalization in text-to-image diffusion models. arXiv preprint arXiv:2410.13370, 2024. 28", + "[130] Zhiyu Zhu, Yingcong Chen, Zhenyu Xie, and Jingyi Yu. Disenvisioner: Disentangled and enriched visual prompt for customized image generation. arXiv preprint arXiv:2410.02067, 2024. 28, 29", + "[131] Silvia Zuffi, Angjoo Kanazawa, and Michael J Black. Lions and tigers and bears: Capturing non-rigid, 3d, articulated shape from images. In CVPR, 2018. 58" + ], + "bbox": [ + 125, + 90, + 872, + 428 + ], + "page_idx": 90 + }, + { + "type": "page_number", + "text": "91", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 90 + } +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05979/c3842c4d-63fe-4775-a7ec-8e60138be3cb_model.json b/data/2025/2504_05xxx/2504.05979/c3842c4d-63fe-4775-a7ec-8e60138be3cb_model.json new file mode 100644 index 0000000000000000000000000000000000000000..c3c78ed4b25f7d836f22bdb052cc3b6f8ff0d01b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/c3842c4d-63fe-4775-a7ec-8e60138be3cb_model.json @@ -0,0 +1,23823 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.263, + 0.061, + 0.706 + ], + "angle": 270, + "content": "arXiv:2504.05979v2 [cs.CV] 10 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.123, + 0.87, + 0.149 + ], + "angle": 0, + "content": "An Empirical Study of GPT-4o Image Generation Capabilities" + }, + { + "type": "text", + "bbox": [ + 0.138, + 0.199, + 0.871, + 0.247 + ], + "angle": 0, + "content": "Sixiang Chen\\(^{1*}\\), Jinbin Bai\\(^{2*}\\), Zhuoran Zhao\\(^{1*}\\), Tian Ye\\(^{1*}\\), Qingyu Shi\\(^{3}\\), Donghao Zhou\\(^{4}\\), Wenhao Chai\\(^{5}\\), Xin Lin\\(^{6}\\), Jianzong Wu\\(^{3}\\), Chao Tang\\(^{3}\\), Shilin Xu\\(^{3}\\), Tao Zhang\\(^{6}\\), Haobo Yuan\\(^{6}\\), Yikang Zhou\\(^{6}\\), Wei Chow\\(^{2}\\), Linfeng Li\\(^{2}\\), Xiangtai Li\\(^{3\\dagger}\\), Lei Zhu\\(^{1,7\\dagger}\\), Lu Qi\\(^{6\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.155, + 0.247, + 0.855, + 0.291 + ], + "angle": 0, + "content": "\\(^{1}\\)The Hong Kong University of Science and Technology (GZ) \\(^{2}\\)National University of Singapore \\(^{3}\\)Peking University \\(^{4}\\)The Chinese University of Hong Kong \\(^{5}\\)University of Washington \\(^{6}\\)Wuhan University \\(^{7}\\)The Hong Kong University of Science and Technology" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.326, + 0.538, + 0.342 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.358, + 0.816, + 0.553 + ], + "angle": 0, + "content": "The landscape of image generation has rapidly evolved, from early GAN-based approaches to diffusion models and, most recently, to unified generative architectures that seek to bridge understanding and generation tasks. Recent advances, especially the GPT-4o, have demonstrated the feasibility of high-fidelity multimodal generation, their architectural design remains mysterious and unpublished. This prompts the question of whether image and text generation have already been successfully integrated into a unified framework for those methods. In this work, we conduct an empirical study of GPT-4o's image generation capabilities, benchmarking it against leading open-source and commercial models. Our evaluation covers four main categories, including text-to-image, image-to-image, image-to-3D, and image-to-X generation, with more than 20 tasks. Our analysis highlights the strengths and limitations of GPT-4o under various settings, and situates it within the broader evolution of generative modeling. Through this investigation, we identify promising directions for future unified generative models, emphasizing the role of architectural design and data scaling. For a high-definition version of the PDF, please refer to the link on GitHub: https://github.com/Ephemeral182/Empirical-Study-of-GPT-4o-Image-Gen." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.579, + 0.269, + 0.595 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.611, + 0.871, + 0.697 + ], + "angle": 0, + "content": "Over the past decade, image generation has undergone a remarkable evolution—from the early successes of GANs [35] to the dominance of diffusion models [89, 82, 26], which have significantly advanced image fidelity and diversity [37, 7]. In parallel, Large Language Models (LLMs) have achieved exceptional performance across diverse natural language tasks by scaling autoregressive next-token prediction, demonstrating the power of unified modeling principles. These advances naturally raise a compelling question: can such principles be extended to image generation?" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.7, + 0.87, + 0.812 + ], + "angle": 0, + "content": "However, fundamental differences between autoregressive and diffusion-based paradigms present non-trivial challenges. Autoregressive models excel in sequential text generation, while diffusion models have become the de facto standard for high-quality image synthesis. Bridging these modalities within a unified framework remains an open challenge. Several works [96, 101, 100, 34, 24, 13] attempt to bridge this gap via multimodal connectors or instruction tuning, with LLMs serving as planning modules that produce intermediate representations for image generation. While effective to some extent, these paradigms often exhibit limited interaction between text and image modalities, and struggle with content consistency—particularly in image-to-image generation and complex instruction-based synthesis." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.818, + 0.871, + 0.876 + ], + "angle": 0, + "content": "To address these limitations, recent research explores unified generation models that integrate understanding and generation within a single architecture, following three main technical paradigms. The first line of work represents both language and vision as discrete token sequences [67, 98, 110, 104, 19, 65, 109], leveraging VQGAN [28] or similar compressors to tokenize images for compatibility with autoregressive models. A second direction integrates" + }, + { + "type": "page_footnote", + "bbox": [ + 0.15, + 0.885, + 0.707, + 0.9 + ], + "angle": 0, + "content": "*Equal contributions. ☑: schen691@connect.hkust-gz.edu.cn † Corresponding authors." + }, + { + "type": "footer", + "bbox": [ + 0.128, + 0.923, + 0.292, + 0.937 + ], + "angle": 0, + "content": "Preprint. Work in progress." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.127, + 0.092, + 0.872, + 0.204 + ], + "angle": 0, + "content": "large language models directly into the diffusion process [128, 126, 112, 72], employing them as denoising backbones for image generation and as unified sequence models for text. While promising, these approaches typically rely on intermediate compression modules such as VAEs or VQVAEs, which may limit visual fidelity or increase architectural complexity. A third and increasingly prominent paradigm investigates discrete diffusion frameworks that natively support both image and text generation within a unified modeling space [71, 73, 93]. Building on this insight, recent works [58, 97] propose fully end-to-end diffusion architectures based on shared Transformer backbones, demonstrating competitive performance and seamless modality integration comparable to similarly sized LLMs." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.209, + 0.871, + 0.239 + ], + "angle": 0, + "content": "Despite these promising directions, such systems still lag behind the sophistication and generalization capabilities of proprietary models like Flux [51] and Midjourney [75], which may lack reasoning capabilities." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.243, + 0.872, + 0.327 + ], + "angle": 0, + "content": "The recent release of GPT-4o [78] marks a significant milestone in multimodal generative modeling. As a native multimodal architecture, GPT-4o demonstrates strong capabilities in generating high-fidelity, photorealistic images while seamlessly unifying vision and language generation—reportedly in an autoregressive fashion. However, its closed-source nature—particularly the lack of disclosure about its architecture, training regimen, and inference mechanisms—poses substantial challenges for scientific scrutiny. This motivates a careful empirical assessment of its capabilities relative to open-source state-of-the-art models." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.333, + 0.872, + 0.404 + ], + "angle": 0, + "content": "Although the visual performance of GPT-4o and Gemini is widely recognized, much of their success likely stems from unprecedented scale in training data, model parameters, and compute resources. Prior studies, including diffusion models and connected-based models, suggest that scaling is a key enabler of generative quality—potentially more so than architectural novelty alone. These trends point to a promising trajectory for unified generative models: with sufficient scale, they may rival or even surpass today's best proprietary systems." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.409, + 0.872, + 0.466 + ], + "angle": 0, + "content": "In this study, we conduct a comprehensive evaluation of GPT-4o's image generation performance, benchmarking its outputs against leading systems including Gemini 2.0 Flash Experimental [99] and other state-of-the-art models. Building upon our comparative evaluation across text-to-image, image-to-image, image-to-3D, and image-to-X generation tasks, GPT-4o demonstrates several distinctive strengths:" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.477, + 0.87, + 0.533 + ], + "angle": 0, + "content": "- Exceptional Text Rendering Capability. GPT-4o demonstrates exceptional capability in rendering textual elements within images, maintaining correct spelling, alignment, and formatting even in document-style generation tasks. This level of text fluency is rarely seen in prior models and is crucial for practical applications such as chart generation, document layout synthesis, and instruction-rich visual storytelling." + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.539, + 0.87, + 0.594 + ], + "angle": 0, + "content": "- Compositional Generalization and Prompt Following. GPT-4o displays impressive compositional abilities, accurately assembling complex scene elements, styles, or attributes described in prompts. This high prompt following enables it to handle fine-grained multi-attribute conditions in generation tasks with minimal loss of semantic detail." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.601, + 0.87, + 0.657 + ], + "angle": 0, + "content": "- Spatial Reasoning and Multi-View Consistency. In generation tasks involving spatial manipulation, such as 3D view synthesis, camera control, and depth-conditioned rendering, GPT-4o maintains geometric consistency and viewpoint realism. This indicates an inherent capacity for spatial reasoning and structural awareness, even without explicit 3D modeling modules." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.662, + 0.87, + 0.745 + ], + "angle": 0, + "content": "- Comprehensive Image Transformation Capability. GPT-4o shows strong generalization across a wide spectrum of image-to-image tasks, ranging from low-level image restoration to high-level perceptual understanding. Without task-specific tuning, it almost handles diverse transformations such as denoising, deblurring, relighting, segmentation, and depth estimation. This suggests the model has learned robust visual priors and spatial semantics, enabling it to perform correction and abstract structural prediction under a unified framework." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.477, + 0.87, + 0.745 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.757, + 0.869, + 0.787 + ], + "angle": 0, + "content": "However, limitations remain in inconsistent generation, hallucination, and data bias in underrepresented cultural elements and non-Latin scripts, highlighting current trade-offs in model design and training data coverage." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.791, + 0.872, + 0.862 + ], + "angle": 0, + "content": "While we do not analyze the internal architecture or implementation details of GPT-4o in this paper*, we believe it plays an important role toward unified multimodal generation. We also emphasize that model architecture is only one part of this progress—training data, model scale, and optimization strategies are equally important. We hope future work will provide more empirical evidence to better understand such proprietary systems and their position within this evolving research landscape." + }, + { + "type": "page_footnote", + "bbox": [ + 0.127, + 0.873, + 0.872, + 0.913 + ], + "angle": 0, + "content": "*There is currently no definitive evidence regarding the specific implementation details or architectural design of GPT-4o's image generation capabilities. To ensure the credibility and accuracy of our analysis, we will refrain from making speculative claims in current version." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.09, + 0.254, + 0.106 + ], + "angle": 0, + "content": "2 Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.121, + 0.871, + 0.163 + ], + "angle": 0, + "content": "As GPT-4o's image generation capability has only recently been released and no API is available, we conduct only qualitative comparisons between GPT-4o, Gemini 2.0 Flash [99], and other state-of-the-art models in their respective domains." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.17, + 0.872, + 0.227 + ], + "angle": 0, + "content": "To systematically compare these models' performance across diverse image generation tasks including text-to-image generation, image-to-image generation, text/image to 3D generation, and various image-to-X generation, we conduct a detailed case study focused on analyzing the performance of these models. This qualitative analysis provides insight into gpt 4o's strengths and limitations in various tasks, as shown in Table 1." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.234, + 0.87, + 0.266 + ], + "angle": 0, + "content": "Low Visual Quality : The image synthesis model fails to generate fine-grained object details or produces blurry outputs. Typical cases include distorted human bodies or unrealistic hand shapes." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.273, + 0.869, + 0.303 + ], + "angle": 0, + "content": "Inconsistent Generation : The image synthesis model produces inconsistent output or image details with input image." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.312, + 0.868, + 0.343 + ], + "angle": 0, + "content": "Lack of Knowledge : The image synthesis model lacks domain-specific knowledge, such as particular artistic styles, and thus generates visually plausible but incorrect results." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.351, + 0.869, + 0.382 + ], + "angle": 0, + "content": "Failure to Follow Instructions : The image synthesis model misinterprets the input prompt and produces inconsistent results. For example, it may fail to capture specified numbers, colors, or object arrangements." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.209, + 0.116, + 0.787, + 0.132 + ], + "angle": 0, + "content": "Table 1: GPT-4o vs. Baselines: Qualitative error analysis across image generation tasks." + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.139, + 0.871, + 0.884 + ], + "angle": 0, + "content": "
Case FigureMeta-taskSub-taskGPT-4oGemini-2.0-flashDomain-SOTA
Figure 1SuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 2Complex Text FollowingSuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 3SuccessSuccessSuccess
Figure 4SuccessSuccessSuccess
Figure 5SuccessSuccessSuccess
Figure 6Text-to-ImageText RenderingSuccessLow Visual QualityLow Visual Quality
Figure 7SuccessLow Visual QualityLow Visual Quality
Figure 8SuccessLow Visual QualityLow Visual Quality
Figure 9Document GenerationSuccessLow Visual QualityLow Visual Quality
Figure 10SuccessLow Visual QualityLow Visual Quality
Figure 11PanoramaLack of KnowledgeSuccessSuccess
Figure 12Style TransferSuccessLack of KnowledgeLack of Knowledge
Figure 13SuccessLack of KnowledgeLack of Knowledge
Figure 14Low Visual QualitySuccessFailure to Follow Instructions
Figure 15Image EditingFailure to Follow InstructionsFailure to Follow InstructionsFailure to Follow Instructions
Figure 16SuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 17SuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 18SuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 19SuccessInconsistent GenerationFailure to Follow Instructions
Figure 20Single-Concept CustomizationSuccessFailure to Follow InstructionsSuccess
Figure 21Multi-Concept CustomizationInconsistent GenerationInconsistent GenerationSuccess
Figure 22Story Image GenerationSuccessFailure to Follow InstructionsSuccess
Figure 23SuccessInconsistent GenerationSuccess
Figure 24Low-Level Vision-DenoisingLow Visual QualityLow Visual QualitySuccess
Figure 25Low-Level Vision-DerainingSuccessInconsistent GenerationSuccess
Figure 26Low-Level Vision-DehazingSuccessLow Visual QualitySuccess
Figure 27Low-Level Vision-Low Light EnhancementLow Visual QualityLow Visual QualitySuccess
Figure 28Low-Level Vision-DeblurringSuccessLow Visual QualitySuccess
Figure 29Low-Level Vision-Super ResolutionSuccessLow Visual QualitySuccess
Figure 30Low-Level Vision-ImpaintingInconsistent GenerationInconsistent GenerationSuccess
Figure 31Low-Level Vision-OutpaintingInconsistent GenerationSuccessSuccess
Figure 32Low-Level Vision-ColorizationSuccessSuccessSuccess
Figure 33Low-Level Vision-Shadow RemovalSuccessFailure to Follow InstructionsSuccess
Figure 34Low-Level Vision-Reflection RemovalInconsistent GenerationFailure to Follow InstructionsSuccess
Figure 35Low-Level Vision-RelightingSuccessFailure to Follow InstructionsSuccess
Figure 36Spatial Control-CannyInconsistent GenerationFailure to Follow InstructionsSuccess
Figure 37Spatial Control-DepthSuccessFailure to Follow InstructionsSuccess
Figure 38Spatial Control-SketchInconsistent GenerationInconsistent GenerationSuccess
Figure 39Spatial Control-PoseSuccessInconsistent GenerationSuccess
Figure 40Spatial Control-MaskInconsistent GenerationFailure to Follow InstructionsInconsistent Generation
Figure 41Camera ControlInconsistent GenerationFailure to Follow InstructionsSuccess
Figure 42Failure to Follow InstructionsFailure to Follow InstructionsSuccess
Figure 43In-Context Visual PromptingFailure to Follow InstructionsFailure to Follow InstructionsN/A
Figure 44Image to 3D ModelingSuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 45UV Map to 3D RenderingSuccessInconsistent GenerationFailure to Follow Instructions
Figure 46Novel View SynthesisSuccessSuccessFailure to Follow Instructions
Figure 47Image SegmentationFailure to Follow InstructionsFailure to Follow InstructionsSuccess
Figure 48SuccessFailure to Follow InstructionsSuccess
Figure 49SuccessFailure to Follow InstructionsSuccess
Figure 50Edge DetectionSuccessSuccessSuccess
Figure 51SuccessFailure to Follow InstructionsSuccess
Figure 52SuccessFailure to Follow InstructionsSuccess
Figure 53Salient ObjectSuccessFailure to Follow InstructionsSuccess
Figure 54SuccessSuccessSuccess
Figure 55SuccessSuccessSuccess
Figure 56Depth EstimationSuccessFailure to Follow InstructionsSuccess
Figure 57Normal EstimationSuccessFailure to Follow InstructionsSuccess
Figure 58Layout DetectionInconsistent GenerationInconsistent GenerationSuccess
Figure 59Text DetectionFailure to Follow InstructionsFailure to Follow InstructionsSuccess
Figure 60Inconsistent GenerationInconsistent GenerationSuccess
Figure 61Inconsistent GenerationInconsistent GenerationSuccess
Figure 62Inconsistent GenerationInconsistent GenerationSuccess
Figure 63Inconsistent GenerationInconsistent GenerationSuccess
" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.312, + 0.106 + ], + "angle": 0, + "content": "2.1 Text-to-Image Tasks" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.117, + 0.429, + 0.133 + ], + "angle": 0, + "content": "2.1.1 Complex Text Following Capability" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.141, + 0.872, + 0.282 + ], + "angle": 0, + "content": "Recent progress in text-to-image generation has shown impressive abilities in generating diverse and realistic images based on text prompts. However, composing multiple objects with various attributes and relationships accurately into one scene remains a significant challenge for current text-to-image generative models [92, 85, 8, 81, 6]. In this section, we assess models' ability for compositional text-to-image generation from four perspectives following [41], which include attribute binding, numeracy, object relationship, and complex compositions. Attribute binding evaluates whether the model correctly assigns attributes, such as color, shape, and texture to the appropriate objects. Numeracy evaluates whether the number of generated objects matches the quantities specified in the prompt. Object relationships refer to both spatial (2D/3D) and non-spatial interactions among objects. Complex compositions evaluate the model's ability to handle multiple types of constraints simultaneously, especially given long or detailed prompts." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.285, + 0.872, + 0.398 + ], + "angle": 0, + "content": "As shown in Figure 1 row 1, GPT-4o outperforms both Gemini 2.0 Flash and Midjourney in numeracy tasks. While GPT-4o accurately represents a single plate, Gemini 2.0 and Midjourney represent two plates instead. In terms of understanding object relationships, GPT-4o is the only model that correctly infers the action \"walk towards\" from the ragdoll to the labrador. However, GPT-4o struggles with more complex terms like \"pentagonal pyramid\", failing to interpret it correctly (see Figure 1 row 4). This suggests that GPT-4o may have difficulty accurately interpreting objects with unusual geometries. When it comes to abstract prompts, GPT-4o also appears to lack imagination (see Figure 2 row 2), whereas Midjourney v6.1 demonstrates better creativity in this case, outperforming both GPT-4o and Gemini 2.0 Flash." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.402, + 0.872, + 0.502 + ], + "angle": 0, + "content": "For complex text-to-image generation, we evaluate GPT-4o's performance with Gemini 2.0 Flash [99] and FLUX.1-Pro [51], using the text prompts collected from [124, 106, 115]. As shown in Figure 3, both GPT-4o and FLUX excel at generating realistic and harmonious scenes align with the text prompts. However, we observe that GPT-4o shows limitations in generating culturally related elements. For example, the generated crown for the Chinese general is western-style rather than chinese-style (see Figure 4 row 2). Additionally, in large scene generation, GPT-4o struggles to maintain boundary continuity, whereas FLUX produces a more natural composition (see Figure 4 row 3)." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.506, + 0.871, + 0.551 + ], + "angle": 0, + "content": "Overall, we conclude that GPT-4o excels at text-to-image generation in terms of attribute binding, generative numeracy, object relationship, and complex compositions. However, it exhibits limitations in generating uncommon objects, culturally specific elements and in maintaining continuity when composing large scenes." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.192, + 0.101, + 0.393, + 0.117 + ], + "angle": 0, + "content": "Text-to-Image Generation" + }, + { + "type": "image", + "bbox": [ + 0.252, + 0.127, + 0.271, + 0.144 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.272, + 0.13, + 0.774, + 0.146 + ], + "angle": 0, + "content": "Evaluation: Visual content precisely following the text instruction." + }, + { + "type": "image", + "bbox": [ + 0.172, + 0.147, + 0.364, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.395, + 0.147, + 0.588, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.615, + 0.147, + 0.824, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.176, + 0.298, + 0.61, + 0.312 + ], + "angle": 0, + "content": "Input Text: \"A yellow bowl, a blue mug and a pink plate on the table.\"" + }, + { + "type": "image", + "bbox": [ + 0.172, + 0.314, + 0.367, + 0.462 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.395, + 0.313, + 0.59, + 0.465 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.615, + 0.311, + 0.822, + 0.465 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.179, + 0.465, + 0.49, + 0.478 + ], + "angle": 0, + "content": "Input Text: \"A ragdoll walks towards a labrador.\"" + }, + { + "type": "image", + "bbox": [ + 0.172, + 0.481, + 0.371, + 0.628 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.481, + 0.59, + 0.628 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.615, + 0.479, + 0.822, + 0.628 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.174, + 0.63, + 0.812, + 0.656 + ], + "angle": 0, + "content": "Input Text: \"Three differently colored apples (yellow, green, red from left to right) with a Coca-Cola bottle placed behind the middle apple.\"" + }, + { + "type": "image", + "bbox": [ + 0.172, + 0.658, + 0.37, + 0.807 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.395, + 0.658, + 0.59, + 0.807 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.615, + 0.658, + 0.822, + 0.807 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.174, + 0.809, + 0.819, + 0.824 + ], + "angle": 0, + "content": "Input Text: \"The oval sphere was nestled between the rectangular prism and the pentagonal pyramid.\"" + }, + { + "type": "text", + "bbox": [ + 0.236, + 0.83, + 0.288, + 0.843 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.437, + 0.832, + 0.551, + 0.845 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.666, + 0.83, + 0.776, + 0.845 + ], + "angle": 0, + "content": "Midjourney v6.1" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.868, + 0.871, + 0.926 + ], + "angle": 0, + "content": "Figure 1: Task: Compositional text-to-image generation. Evaluate the image-text alignment on attribute binding, numeracy, and object relationship. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o outperforms Gemini 2.0 Flash and Midjourney v6.1 across all aspects. However, GPT-4o struggles with uncommon objects with a special geometry." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.195, + 0.103, + 0.393, + 0.119 + ], + "angle": 0, + "content": "Text-to-Image Generation" + }, + { + "type": "image", + "bbox": [ + 0.254, + 0.128, + 0.271, + 0.145 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.274, + 0.131, + 0.774, + 0.146 + ], + "angle": 0, + "content": "Evaluation: Visual content precisely following the text instruction." + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.147, + 0.373, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.395, + 0.147, + 0.612, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.634, + 0.147, + 0.825, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.176, + 0.296, + 0.832, + 0.319 + ], + "angle": 0, + "content": "Input Text: \"The round, juicy watermelon sat in the cool, refreshing bowl of ice, waiting to be sliced open and devoured.\"" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.32, + 0.373, + 0.47 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.398, + 0.32, + 0.611, + 0.469 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.633, + 0.32, + 0.828, + 0.47 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.179, + 0.472, + 0.829, + 0.497 + ], + "angle": 0, + "content": "Input Text: \"The bold, expressive strokes of the artist's brush brought the blank canvas to life, forming a vibrant and dynamic masterpiece.\"" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.497, + 0.373, + 0.645 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.398, + 0.497, + 0.611, + 0.645 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.633, + 0.497, + 0.829, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.182, + 0.648, + 0.701, + 0.661 + ], + "angle": 0, + "content": "Input Text: \"The heavy raindrops fell on the smooth glass and the textured roof.\"" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.663, + 0.373, + 0.813 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.398, + 0.663, + 0.612, + 0.814 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.633, + 0.663, + 0.829, + 0.814 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.178, + 0.815, + 0.808, + 0.841 + ], + "angle": 0, + "content": "Input Text: \"The gentle, soothing melody of the piano filled the concert hall, as the pianist's fingers danced over the keys.\"" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.843, + 0.286, + 0.855 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.436, + 0.843, + 0.548, + 0.855 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.664, + 0.842, + 0.772, + 0.856 + ], + "angle": 0, + "content": "Midjourney v6.1" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.873, + 0.87, + 0.943 + ], + "angle": 0, + "content": "Figure 2: Task: Compositional text-to-image generation. Evaluate the image-text alignment on attribute binding and complex compositions. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o outperforms the other two models in generating objects aligned with the text prompts accurately. But for more abstract and creative tasks, Midjourney v6.1 performs the best." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.947 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.176, + 0.108, + 0.394, + 0.138 + ], + "angle": 0, + "content": "Text-to-Image Generation (with complex text prompt)" + }, + { + "type": "title", + "bbox": [ + 0.225, + 0.149, + 0.788, + 0.166 + ], + "angle": 0, + "content": "Evaluation: Visual content precisely following the text instruction." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.168, + 0.361, + 0.312 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.392, + 0.168, + 0.579, + 0.312 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.611, + 0.168, + 0.798, + 0.312 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.165, + 0.318, + 0.825, + 0.384 + ], + "angle": 0, + "content": "Input Text: \"An icy landscape. A vast expanse of snow-covered mountain peaks stretches endlessly. Beneath them is a dense forest and a colossal frozen lake. Three people are boating in three boats separately in the lake. Not far from the lake, a volcano threatens eruption, its rumblings felt even from afar. Above, a ferocious red dragon dominates the sky and commands the heavens, fueled by the volcano's relentless energy flow.\" (Prompt from GenArtist)" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.391, + 0.361, + 0.533 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.392, + 0.39, + 0.579, + 0.534 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.39, + 0.797, + 0.534 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.541, + 0.825, + 0.596 + ], + "angle": 0, + "content": "Input Text: \"On the rooftop of a skyscraper in a bustling cyberpunk city, a figure in a trench coat and neon-lit visor stands amidst a garden of bio-luminescent plants, overlooking the maze of flying cars and towering holograms. Robotic birds flit among the foliage, digital billboards flash advertisements in the distance.\" (Prompt from IterComp)" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.6, + 0.361, + 0.743 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.392, + 0.599, + 0.58, + 0.744 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.611, + 0.599, + 0.797, + 0.743 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.747, + 0.825, + 0.801 + ], + "angle": 0, + "content": "Input Text: \"In a magical seascape, a majestic ship sails through crystal blue waters surrounded by vibrant marine life and soaring birds. Towering cliffs frame the scene, while a stunning rainbow arches across the sky, blending with ethereal clouds. This enchanting journey captures the serene beauty of nature's wonders.\" (Prompt from IterComp)" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.807, + 0.292, + 0.821 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.419, + 0.807, + 0.545, + 0.821 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.678, + 0.807, + 0.724, + 0.821 + ], + "angle": 0, + "content": "FLUX" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.84, + 0.872, + 0.896 + ], + "angle": 0, + "content": "Figure 3: Task: Compositional text-to-image generation. Evaluate the image-text alignment on complex compositions. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and FLUX.1-Pro [51]. Observation: GPT-4o and FLUX can generate more harmonious and natural scene than Gemini 2.0 Flash." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.169, + 0.12, + 0.388, + 0.151 + ], + "angle": 0, + "content": "Text-to-Image Generation (with complex text prompt)" + }, + { + "type": "image", + "bbox": [ + 0.199, + 0.159, + 0.218, + 0.176 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.223, + 0.163, + 0.789, + 0.179 + ], + "angle": 0, + "content": "Evaluation: Visual content precisely following the text instruction." + }, + { + "type": "image", + "bbox": [ + 0.172, + 0.183, + 0.359, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.395, + 0.184, + 0.58, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.612, + 0.184, + 0.796, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.156, + 0.332, + 0.813, + 0.386 + ], + "angle": 0, + "content": "Input Text: \"Under the luminous full moon, a serene Japanese garden with traditional pagodas and a tranquil pond creates a magical night scene. The soft glow from the lantern-lit buildings reflects on the water, blending nature and architecture in harmony. The moonlight bathes the landscape, enhancing the peaceful ambiance.\" (Prompt from IterComp)" + }, + { + "type": "image", + "bbox": [ + 0.199, + 0.395, + 0.334, + 0.548 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.415, + 0.395, + 0.559, + 0.548 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.628, + 0.395, + 0.779, + 0.549 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.163, + 0.556, + 0.82, + 0.61 + ], + "angle": 0, + "content": "Input Text: \"A Chinese general wearing a crown, with whiskers and golden Chinese style armor, standing with a majestic dragon head on his chest, symbolizing his strength, wearing black and gold boots. His appearance exudes a sense of authority, wisdom, and an unyielding spirit, embodying the ideal ancient Chinese hero.\" (Prompt from RPG)" + }, + { + "type": "image", + "bbox": [ + 0.152, + 0.618, + 0.359, + 0.726 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.619, + 0.506, + 0.726 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.618, + 0.653, + 0.726 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.656, + 0.618, + 0.844, + 0.726 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.163, + 0.734, + 0.82, + 0.788 + ], + "angle": 0, + "content": "Input Text: \"A beautiful landscape with a river in the middle, the left of the river is in the evening and in the winter with a big iceberg and a small village while some people are skiing on the river and some people are skating, the right of the river is in the summer with a volcano in the morning and a small village while some people are playing.\" (Prompt from RPG)" + }, + { + "type": "text", + "bbox": [ + 0.222, + 0.796, + 0.279, + 0.81 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.432, + 0.796, + 0.558, + 0.81 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.723, + 0.793, + 0.769, + 0.807 + ], + "angle": 0, + "content": "FLUX" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.826, + 0.871, + 0.882 + ], + "angle": 0, + "content": "Figure 4: Task: Compositional text-to-image generation. Evaluate the image-text alignment on complex compositions. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and FLUX.1-Pro [51]. Observation: GPT-4o struggles to generate culturally related elements and maintain boundary continuity (see rows 2 and 3), similar to Gemini 2.0 Flash and FLUX." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.293, + 0.108 + ], + "angle": 0, + "content": "2.1.2 Text Rendering" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.115, + 0.871, + 0.159 + ], + "angle": 0, + "content": "Text rendering is a task that aims at generating texts (characters, sentences, or even paragraphs) on an image. The text content is usually guided by the input prompt. Previous models [27, 2] show good capability in generating short text (within 10 words, such as signs or short phrases), but their ability to generate long texts remains limited." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.164, + 0.87, + 0.22 + ], + "angle": 0, + "content": "As shown in Figure 5, GPT-4o demonstrates comparable abilities to existing state-of-the-art (SOTA) baselines when generating short texts. All the methods except FLUX [51] perform well at rendering short text following the prompt. In this section, we primarily focus on long text rendering to examine whether GPT-4o can surpass these baselines for extended textual content." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.226, + 0.869, + 0.255 + ], + "angle": 0, + "content": "We choose POSTA [12], Gemini 2.0 Flash [99], Ideogram 3.0 [2], and Playground-v3 [64] as the baselines because of their established capabilities in rendering longer texts. The results are shown in Figure 6 and Figure 7." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.261, + 0.547, + 0.275 + ], + "angle": 0, + "content": "From these examples, we make the following key observations:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.286, + 0.865, + 0.34 + ], + "angle": 0, + "content": "- GPT-4o's strength in long text generation: Compared with other baselines, GPT-4o demonstrates a superior ability to generate long, coherent text. In example 1 and example 3, GPT-4o produces detailed textual information with fewer than three characters generated incorrectly across more than 100 characters of text." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.346, + 0.868, + 0.386 + ], + "angle": 0, + "content": "- Baseline limitations: When the input prompt becomes extremely long, models such as Gemini 2.0 Flash, Ideogram 3.0, and Playground-v3 often produce significantly more errors or produce vague text patches that are difficult to recognize." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.392, + 0.867, + 0.433 + ], + "angle": 0, + "content": "- POSTA's performance: As a model specifically designed for poster-style text generation, POSTA performs closely to, or in some instances slightly more precisely than, GPT-4o. We hypothesize this is due to its multi-step pipeline tailored for long text rendering." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.286, + 0.868, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.446, + 0.87, + 0.475 + ], + "angle": 0, + "content": "Overall, we conclude that GPT-4o excels at long text rendering, offering overwhelming performance compared to most existing commercial models, and delivering results on par with the latest specialized research models." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.164, + 0.115, + 0.336, + 0.131 + ], + "angle": 0, + "content": "Short Text Rendering" + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.142, + 0.361, + 0.159 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.363, + 0.146, + 0.658, + 0.161 + ], + "angle": 0, + "content": "Evaluation: Text Rendering Precision." + }, + { + "type": "image", + "bbox": [ + 0.159, + 0.164, + 0.297, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.316, + 0.165, + 0.453, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.471, + 0.165, + 0.611, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.629, + 0.165, + 0.839, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.28, + 0.819, + 0.325 + ], + "angle": 0, + "content": "Input Text: \"A beautiful painting of flowing colors and styles forming the words 'The GPT-4o/Ideogram/FLUX/SD3 research paper is nowhere!'. the background is speckled with drops and splashes of paint.\"" + }, + { + "type": "image", + "bbox": [ + 0.159, + 0.332, + 0.252, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.332, + 0.415, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.439, + 0.332, + 0.576, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.601, + 0.332, + 0.841, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.167, + 0.448, + 0.782, + 0.478 + ], + "angle": 0, + "content": "Input Text: \"Beautiful pixel art of a Wizard with hovering text 'Achievement unlocked: Diffusion models can spell now'.\"" + }, + { + "type": "image", + "bbox": [ + 0.159, + 0.487, + 0.253, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.283, + 0.487, + 0.503, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.534, + 0.487, + 0.671, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.703, + 0.487, + 0.84, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.167, + 0.604, + 0.785, + 0.619 + ], + "angle": 0, + "content": "Input Text: \"A monkey holding a sign reading 'Scaling transformer models is awesome'.\"" + }, + { + "type": "image", + "bbox": [ + 0.159, + 0.632, + 0.296, + 0.739 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.306, + 0.632, + 0.444, + 0.739 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.453, + 0.632, + 0.591, + 0.739 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.602, + 0.633, + 0.838, + 0.739 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.166, + 0.745, + 0.805, + 0.774 + ], + "angle": 0, + "content": "Input Text: \"A surreal and humorous scene in a classroom with the words 'GPUs go brrrrr' written in white chalk on a blackboard. In front of the blackboard.\"" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.785, + 0.253, + 0.798 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.327, + 0.785, + 0.423, + 0.8 + ], + "angle": 0, + "content": "Ideogram 3.0" + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.785, + 0.543, + 0.798 + ], + "angle": 0, + "content": "FLUX" + }, + { + "type": "text", + "bbox": [ + 0.702, + 0.785, + 0.738, + 0.798 + ], + "angle": 0, + "content": "SD3" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.823, + 0.87, + 0.893 + ], + "angle": 0, + "content": "Figure 5: Task: Short text rendering. Generate prompt-aligned, concise textual content (typically within 10 words) on an image. Setup: Each sample is produced based on a guiding text prompt. Comparisons are made with prior SOTA models [27, 2] and FLUX [51]. Observations: GPT-4o achieves performance on par with existing SOTA baselines in rendering short texts, consistently following the prompt with minimal errors. All evaluated methods—except FLUX [51]—deliver high-fidelity results in this setting." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.507, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.208, + 0.101, + 0.361, + 0.117 + ], + "angle": 0, + "content": "Long Text Rendering" + }, + { + "type": "title", + "bbox": [ + 0.375, + 0.128, + 0.644, + 0.142 + ], + "angle": 0, + "content": "Evaluation: Text Rendering Precision." + }, + { + "type": "image", + "bbox": [ + 0.183, + 0.147, + 0.321, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.228, + 0.312, + 0.277, + 0.324 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "image", + "bbox": [ + 0.322, + 0.147, + 0.464, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.37, + 0.312, + 0.42, + 0.324 + ], + "angle": 0, + "content": "POSTA" + }, + { + "type": "image", + "bbox": [ + 0.465, + 0.147, + 0.605, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.48, + 0.312, + 0.588, + 0.324 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "image", + "bbox": [ + 0.606, + 0.147, + 0.813, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.663, + 0.312, + 0.753, + 0.326 + ], + "angle": 0, + "content": "Ideogram 3.0" + }, + { + "type": "title", + "bbox": [ + 0.189, + 0.328, + 0.271, + 0.34 + ], + "angle": 0, + "content": "Input Text:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.341, + 0.81, + 0.367 + ], + "angle": 0, + "content": "\"Generate a movie poster with a sci-fi space theme, a solitary figure standing on an alien planet, facing a massive outpost." + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.367, + 0.442, + 0.38 + ], + "angle": 0, + "content": "The poster displays the following text:" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.38, + 0.346, + 0.392 + ], + "angle": 0, + "content": "Title: The Last Outpost" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.392, + 0.487, + 0.405 + ], + "angle": 0, + "content": "Subtitle: When the stars fall, the truth rises" + }, + { + "type": "title", + "bbox": [ + 0.189, + 0.406, + 0.277, + 0.417 + ], + "angle": 0, + "content": "Information:" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.418, + 0.368, + 0.431 + ], + "angle": 0, + "content": "Produced by Jackson Ward" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.432, + 0.338, + 0.444 + ], + "angle": 0, + "content": "Music by Aria Calloway" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.444, + 0.371, + 0.457 + ], + "angle": 0, + "content": "Screenplay by Elena Sharpe" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.457, + 0.371, + 0.47 + ], + "angle": 0, + "content": "Directed By Sylvia Hartman" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.47, + 0.797, + 0.52 + ], + "angle": 0, + "content": "\"A visually stunning and narratively gripping exploration of the unknown. The Last Outpost masterfully blends elements of science fiction, mystery, and psychological thriller, creating a hauntingly atmospheric journey that will leave audiences on the edge of their seats.\" -- Global Film Review\"." + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.525, + 0.318, + 0.683 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.228, + 0.691, + 0.276, + 0.703 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "image", + "bbox": [ + 0.32, + 0.525, + 0.462, + 0.684 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.37, + 0.691, + 0.42, + 0.703 + ], + "angle": 0, + "content": "POSTA" + }, + { + "type": "image", + "bbox": [ + 0.462, + 0.524, + 0.617, + 0.684 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.48, + 0.691, + 0.588, + 0.703 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "image", + "bbox": [ + 0.618, + 0.525, + 0.824, + 0.684 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.663, + 0.691, + 0.753, + 0.704 + ], + "angle": 0, + "content": "Ideogram 3.0" + }, + { + "type": "title", + "bbox": [ + 0.185, + 0.707, + 0.266, + 0.719 + ], + "angle": 0, + "content": "Input Text:" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.72, + 0.802, + 0.771 + ], + "angle": 0, + "content": "\"Create a poster with the theme of a Journey of Solitude. The background should depict a lone figure walking toward an unusable form of transportation. The scene should evoke a sense of being lost, helplessness, and desolation, capturing the emotional weight of losing oneself in a barren, unforgiving landscape." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.771, + 0.343, + 0.785 + ], + "angle": 0, + "content": "Title: Solitary Journeys" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.785, + 0.318, + 0.796 + ], + "angle": 0, + "content": "Subtitle: Elara Voss" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.797, + 0.565, + 0.809 + ], + "angle": 0, + "content": "Information: WANDERING THROUGH THE UNKNOWN\"." + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.83, + 0.872, + 0.928 + ], + "angle": 0, + "content": "Figure 6: Task: Long text rendering. Generate extended, coherent, and prompt-consistent textual content on an image. Setup: Evaluations are conducted against advanced baselines including POSTA [12], Gemini 2.0 Flash [99], Ideogram 3.0 [2], and Playground-v3 [64]. Observations: GPT-4o excels in long text rendering by producing coherent, detailed textual information with very few character errors. In contrast, models like Gemini 2.0 Flash, Ideogram 3.0, and Playground-v3 often exhibit increased errors or generate vague text when faced with lengthy prompts, while POSTA's tailored multi-step pipeline sometimes yields competitive precision. Overall, GPT-4o outperforms most commercial models and rivals specialized research approaches in extended text generation." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.201, + 0.292, + 0.357, + 0.309 + ], + "angle": 0, + "content": "Long Text Rendering" + }, + { + "type": "image", + "bbox": [ + 0.347, + 0.317, + 0.368, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.368, + 0.321, + 0.648, + 0.336 + ], + "angle": 0, + "content": "Evaluation: Text Rendering Precision." + }, + { + "type": "image", + "bbox": [ + 0.167, + 0.34, + 0.308, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.212, + 0.511, + 0.263, + 0.523 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "image", + "bbox": [ + 0.31, + 0.34, + 0.455, + 0.504 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.356, + 0.511, + 0.408, + 0.523 + ], + "angle": 0, + "content": "POSTA" + }, + { + "type": "image", + "bbox": [ + 0.455, + 0.34, + 0.616, + 0.504 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.48, + 0.511, + 0.591, + 0.524 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "image", + "bbox": [ + 0.617, + 0.34, + 0.829, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.684, + 0.511, + 0.78, + 0.525 + ], + "angle": 0, + "content": "Playground-v3" + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.538, + 0.261, + 0.551 + ], + "angle": 0, + "content": "Input Text:" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.552, + 0.793, + 0.592 + ], + "angle": 0, + "content": "\"Please generate an artistic and stylized promotional poster. The style is an artistic painting style. The theme is about nature and city. The poster displays the following information: Title: Fragmented Harmony" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.592, + 0.536, + 0.605 + ], + "angle": 0, + "content": "Subtitle Between the steel and sky, life finds its way." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.605, + 0.815, + 0.672 + ], + "angle": 0, + "content": "Information: Amid the towering strucions and the quiet persistence of nature, a delicate balance emerges. The complex and often contradictory relationship between urban development and the natural world reveals itself in fleeting moments of harmony. Though fragmented, life continues, threading its way through the shadows of progress. Here, conflict and coexistence form an intricate dance--sometimes at odds, sometimes in unexpected unity\"." + }, + { + "type": "image_caption", + "bbox": [ + 0.196, + 0.7, + 0.799, + 0.715 + ], + "angle": 0, + "content": "Figure 7: Task: Long text rendering. The Setup and Observations are the same as Figure 6." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.339, + 0.106 + ], + "angle": 0, + "content": "2.1.3 Document Generation" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.115, + 0.872, + 0.158 + ], + "angle": 0, + "content": "We also explore a novel task: document image generation with GPT-4o, comparing its performance with Gemini 2.0 Flash [99] and Playground-v3 [64]. As shown in Figure 8 - 10, GPT-4o produces document images with cleaner layouts and more consistent content." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.177, + 0.393, + 0.193 + ], + "angle": 0, + "content": "Document Image Generation" + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.204, + 0.357, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.358, + 0.208, + 0.651, + 0.223 + ], + "angle": 0, + "content": "Evaluation: Text Rendering Precision." + }, + { + "type": "title", + "bbox": [ + 0.189, + 0.256, + 0.333, + 0.267 + ], + "angle": 0, + "content": "Attention Is All You Need" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.277, + 0.341, + 0.298 + ], + "angle": 0, + "content": "Ashish Vaswani Noam Shazeer Niki Parmar Jakob Uszkoreit Lilion Jones Aidan N.Gomez Lukasz Kaiser IIIa Polosukhin" + }, + { + "type": "title", + "bbox": [ + 0.245, + 0.311, + 0.279, + 0.319 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.321, + 0.369, + 0.415 + ], + "angle": 0, + "content": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on tw machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature." + }, + { + "type": "title", + "bbox": [ + 0.405, + 0.324, + 0.503, + 0.331 + ], + "angle": 0, + "content": "Attention Is All You Need" + }, + { + "type": "text", + "bbox": [ + 0.398, + 0.331, + 0.568, + 0.416 + ], + "angle": 0, + "content": "Ashish Vourakis, NM Nazarov, NJI Parmar, Jbab Udekoreli, Lillion Jones, Adiinil S. W. Coomce, Lakota Kiber, Pala Poslashov, and M. A. D. G. R. Smith. 2017. The neural network models are based on recurrent or conventional neural networks in an encoder-decoder loss. The best produced models also connect the encoder and decoder loss through an attention mechanism. As, we propose a new study mechanism for a machine, the Transformer, based attention mechanisms, dispensing with the same weight as the encoder-decoder loss. The results of the training tasks show these models to be superior in quality while being more parsibilized and requiring significantly less time to time to move train. Our model achieves 84.2 BLUE in the WMT-50 Go-to-translation task, which is comparable to the performance of our previous work [3]. In addition, our WMT-30 Pragmatic task, our model established a new single-model state-of-the-art-state-of-the-art BLUE score of 41.8 when \\(\\alpha = 5\\) training for 3.5 days on GPUs after fraction of the training costs of the best models from literature. We propose Transformer generalizes well by applying it successfully to English syntacticity parsing both with large and limited training data." + }, + { + "type": "title", + "bbox": [ + 0.603, + 0.229, + 0.706, + 0.237 + ], + "angle": 0, + "content": "Attention Is All You Need" + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.242, + 0.693, + 0.27 + ], + "angle": 0, + "content": "Ashish A. [Al] You. Wea a nono' Ainon 1 \nAshish Yawani, Naqadzaree, laokotri \nAlok Uzzotner, Anokalika Sanik, Jokoslav adar, Gosak III Ploosukhaini \nAlok Uzzotner, Anokalika Sanik" + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.281, + 0.83, + 0.418 + ], + "angle": 0, + "content": "Antext, exotocic sequecra transoedscnncs on cbr be caes baccared on bracococcyne nort bell netiabon an ecocycclion, an ecocyclon. TeTrane: the ensonnnmnsn neeepnckian. Epipcnie rile kceely on meenctiny As adeterdiencr. \nnpopioors sonr tonarwamchim. I mtnr. vorti. inenpoea a dedusum minnyss onomcrh. cortordone.lora ontata tose or uin hoperiosper. \nThe rorner is s oovl dtt maive de acemnccnodkdu aleu cormunb-dlr bing dvl-ndr 016de and mechance \n11 Dae nucnccnng \nceso nucnse \n12 - eannnr er on attonne amehnes asnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnnccnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn" + }, + { + "type": "text", + "bbox": [ + 0.234, + 0.427, + 0.292, + 0.44 + ], + "angle": 0, + "content": "GPT40" + }, + { + "type": "text", + "bbox": [ + 0.418, + 0.428, + 0.546, + 0.442 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.664, + 0.426, + 0.773, + 0.443 + ], + "angle": 0, + "content": "Playground-v3" + }, + { + "type": "title", + "bbox": [ + 0.163, + 0.449, + 0.26, + 0.464 + ], + "angle": 0, + "content": "Input Text:" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.466, + 0.797, + 0.5 + ], + "angle": 0, + "content": "\"Generate A realistic screenshot of the first page of the Paper from the following information:" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.504, + 0.412, + 0.519 + ], + "angle": 0, + "content": "Title: Attention Is All You Need" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.523, + 0.834, + 0.557 + ], + "angle": 0, + "content": "Author List: Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.561, + 0.832, + 0.825 + ], + "angle": 0, + "content": "Abstract: The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.\"" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.843, + 0.871, + 0.884 + ], + "angle": 0, + "content": "Figure 8: Task: Document image generation. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and Playground-v3 [64]. Observation: GPT-4o can generate more consistent and accurate font and format than the other two models." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.171, + 0.151, + 0.392, + 0.168 + ], + "angle": 0, + "content": "Document Image Generation" + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.177, + 0.355, + 0.195 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.357, + 0.181, + 0.651, + 0.198 + ], + "angle": 0, + "content": "Evaluation: Text Rendering Precision." + }, + { + "type": "title", + "bbox": [ + 0.162, + 0.225, + 0.38, + 0.251 + ], + "angle": 0, + "content": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.261, + 0.335, + 0.278 + ], + "angle": 0, + "content": "Jacob Devlin Ming-Wei Chang Kenton Lee Kristina Toutanova" + }, + { + "type": "title", + "bbox": [ + 0.252, + 0.291, + 0.288, + 0.3 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.302, + 0.384, + 0.367 + ], + "angle": 0, + "content": "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result; the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications." + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.367, + 0.388, + 0.404 + ], + "angle": 0, + "content": "BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to \\(80.5\\%\\) (7.7% point absolute improvement), MultiNLI accuracy to \\(\\mathcal{S}6.7\\%\\) (4.6% absolute improvement), SQAud v1.1 question answering Test F1 to 93.2 (1.5 point absolute" + }, + { + "type": "text", + "bbox": [ + 0.242, + 0.41, + 0.3, + 0.424 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "title", + "bbox": [ + 0.418, + 0.226, + 0.584, + 0.248 + ], + "angle": 0, + "content": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" + }, + { + "type": "text", + "bbox": [ + 0.421, + 0.251, + 0.58, + 0.266 + ], + "angle": 0, + "content": "Ashlor Jacob Doslin Ming Wei Chang, Kenton Lee Abstrut Win, Karinlin Touranosa" + }, + { + "type": "text", + "bbox": [ + 0.418, + 0.266, + 0.589, + 0.321 + ], + "angle": 0, + "content": "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Cryostatnet as Transformer. Unlike recnec mean, BERT is designed by pre-train deep bidirectional representations from nonshaded text by jasily selfconforming on both text 1st&4, xavier coint eorect, tate 1st&2 to win 1st, sacp0-twincent BERT model can be fine mused with just one additional output layer to create of dafve for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications." + }, + { + "type": "text", + "bbox": [ + 0.418, + 0.328, + 0.59, + 0.376 + ], + "angle": 0, + "content": "BERT is conceptually simple and empirically powerful. It obtains now state-of-the-art results on eleven evertweaven multimoment language processing tasks, including GLUE score to \\(80.5\\%\\) (7.69 absolute improvement), pushing the GLUE alloselect improvement). MulaNLI accuracy to \\(\\varepsilon_{\\mathrm{M}} = 0.1\\), \\(\\nu_{\\mathrm{L}} < \\varepsilon_{\\mathrm{M}} / \\varepsilon_{\\mathrm{M}}\\) (1.5 point absolute improvement) and SQoAD v2.0 Test F1 to 83.1 (3.1 point absolute improvement)." + }, + { + "type": "text", + "bbox": [ + 0.439, + 0.411, + 0.567, + 0.425 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "title", + "bbox": [ + 0.621, + 0.23, + 0.824, + 0.248 + ], + "angle": 0, + "content": "BERT: Pre-training on Lepi Bidellar Tansson Translons for Language Understond mderance litting' cetting from t cowf henvaming" + }, + { + "type": "text", + "bbox": [ + 0.621, + 0.251, + 0.653, + 0.256 + ], + "angle": 0, + "content": "Author, List:" + }, + { + "type": "text", + "bbox": [ + 0.621, + 0.257, + 0.821, + 0.269 + ], + "angle": 0, + "content": "J Asbad Devlin, Yiw Changuaguagaa Kionn age rspgectangane cans \n\\(^{a}\\)pressin liKistcn-Toutanfa" + }, + { + "type": "text", + "bbox": [ + 0.621, + 0.269, + 0.689, + 0.275 + ], + "angle": 0, + "content": "represeons-Uintanlvania" + }, + { + "type": "list", + "bbox": [ + 0.621, + 0.251, + 0.821, + 0.275 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.621, + 0.276, + 0.648, + 0.283 + ], + "angle": 0, + "content": "A" + }, + { + "type": "text", + "bbox": [ + 0.621, + 0.287, + 0.836, + 0.329 + ], + "angle": 0, + "content": "We introduce a new languagegretrovetercendentiale monoclin klonstionist monole conBldfecarstaadss reprenters from nemer raje Sfiflnonanecones. desessnissrall ranauagaleafdelfe xyn unming on hnlaesaeare two ploddes also the por-entant canteletory a state-vwraon one-on-one coffice of anisotropy, and the ploso-syntropic colective of states. Of s2s212310 pain or questionbmporansuansluangene Tcnf?f to ingest ingf Sf10 tto 46 w. (I, test),marshersonalizne imnance immmens" + }, + { + "type": "text", + "bbox": [ + 0.621, + 0.339, + 0.801, + 0.346 + ], + "angle": 0, + "content": "BERT is conceptually imlilienenarplenpholcft-nu surate-fine-ams" + }, + { + "type": "text", + "bbox": [ + 0.621, + 0.347, + 0.824, + 0.365 + ], + "angle": 0, + "content": "ronen 1 oonranaalwauanu viipopluoteforocnirandinns inget caught anourage, vovlurvulgina for nain. 2004. The use of the word \"sulfur\" in the text is a question envoing SUIF u697." + }, + { + "type": "text", + "bbox": [ + 0.621, + 0.365, + 0.837, + 0.379 + ], + "angle": 0, + "content": "- GBFEscors/aanoreqquasurf and Squad w.10 aninlvalte 83.7% 4.6% (X) \n- TeST fto onop:11.3% (x)" + }, + { + "type": "text", + "bbox": [ + 0.621, + 0.383, + 0.836, + 0.403 + ], + "angle": 0, + "content": "→ BERT is cponconuynlyrsnaintally pocefine-at-ut: ouvah176. (JET v.c.37% quinting anguen linyuH-aCLS sccorts onoonssthe sonea 4000A/AresoVc LEAU pioiHcB: gnrmaeh an epesrourinans A7c)0v o.o35 aed 1170" + }, + { + "type": "list", + "bbox": [ + 0.621, + 0.339, + 0.837, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.677, + 0.41, + 0.786, + 0.427 + ], + "angle": 0, + "content": "Playground-v3" + }, + { + "type": "title", + "bbox": [ + 0.162, + 0.441, + 0.259, + 0.456 + ], + "angle": 0, + "content": "Input Text:" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.458, + 0.79, + 0.492 + ], + "angle": 0, + "content": "Generate A realistic screenshot of the first page of the Paper from the following information:" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.496, + 0.735, + 0.533 + ], + "angle": 0, + "content": "Title: BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.535, + 0.836, + 0.702 + ], + "angle": 0, + "content": "Author List: Jacob Devlin, Ming-Wei Chang, Kenton Lee, Kristina Toutanova \nAbstract: We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications." + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.705, + 0.836, + 0.798 + ], + "angle": 0, + "content": "BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to \\(80.5\\%\\) (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement)." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.838, + 0.818, + 0.854 + ], + "angle": 0, + "content": "Figure 9: Task: Document image generation. The Setup and Observations are the same as Fig. 8." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.171, + 0.128, + 0.391, + 0.144 + ], + "angle": 0, + "content": "Document Image Generation" + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.154, + 0.355, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.358, + 0.158, + 0.651, + 0.175 + ], + "angle": 0, + "content": "Evaluation: Text Rendering Precision." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.213, + 0.346, + 0.25 + ], + "angle": 0, + "content": "You Only Look Once: Unified, Real-Time Object Detection" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.259, + 0.361, + 0.275 + ], + "angle": 0, + "content": "Joseph Redmon, Santosh Divvala, Ross Girshick Ali Farhadi" + }, + { + "type": "title", + "bbox": [ + 0.251, + 0.286, + 0.285, + 0.293 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.294, + 0.376, + 0.347 + ], + "angle": 0, + "content": "We present YOLO, a new approach to object detection. Prior work on object detection repurposes classifiers to perform detection. Instead, we frame object detection as a regression problem to spatially separated bounding boxes and associated class probabilities. A single neural network predicts bounding boxes and class probabilities directly from full images in one evaluation. Since the whole detection pipeline is a single network, it can be optimized end-to-end directly on detection performance." + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.347, + 0.377, + 0.388 + ], + "angle": 0, + "content": "Our unified architecture is extremely fast. Our base YOLO model processes images in real-time at 45 frames per second. A smaller version of the network. Fast YOLO, processes an astounding 155 frames per second while still achieving double the mAP of other real-time detectors. Compared to state-of-the-art detection systems YOLO makes more localization errors but is far less likely to pred" + }, + { + "type": "text", + "bbox": [ + 0.416, + 0.216, + 0.581, + 0.234 + ], + "angle": 0, + "content": "You Only Look Once: Unlimited, Time/Indirect Decciption \nAuthor:Lesser Jowesh Redmonial Siptedthi Adri Farhad" + }, + { + "type": "text", + "bbox": [ + 0.415, + 0.236, + 0.624, + 0.26 + ], + "angle": 0, + "content": "We present YJLO, a new approach to object detection. Driver search uses oleejeon cieeuses to represen or correct deferential finwication. Istegedel, when farstial, we confirmed anapase, agate trius to a signafal or sialipal staphylococcal preprobed boing boos summarilyour heartbodn frontal nater and the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the" + }, + { + "type": "text", + "bbox": [ + 0.415, + 0.261, + 0.498, + 0.266 + ], + "angle": 0, + "content": "Acatat Rechun-Yorim Bogae lo rojctc filocly" + }, + { + "type": "text", + "bbox": [ + 0.415, + 0.269, + 0.623, + 0.296 + ], + "angle": 0, + "content": "Our undersonged tandoce boe + ant - or cemoe - aepocemis in a times, bodingly narmabogus haarban. Jnci enwecstic ancoontie fluy fast YOLO pue moebes, 45f rans perennetioles unperenentio- dorensis in reel thane reobexes petarges. ectcylcfom oene trsoute of sucrose princeia of docuta. d. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .." + }, + { + "type": "table", + "bbox": [ + 0.418, + 0.299, + 0.629, + 0.345 + ], + "angle": 0, + "content": "
Abstr. LOC nomencl.
FascicristinFas1/3
MADS-eos-miR-fc::rec_mucmuc
fli-2 (C9-03)fli-25p438%
#868E1 (b-cd)#868ap33<%
#868E1 (b-cd, c-d)#868ap33<%
#868E1 (b-cd, d-c)#868ap33<%
#868E1 (b-cd, e-c)#868ap33<%
#868E1 (b-cd, f-c)#868ap33<%
#868E1 (b-cd, g-c)#868ap33<%
#868E1 (b-cd, h-c)#868ap33<%
#868E1 (b-cd, i-c)#868ap33<%
#868E1 (b-cd, j-c)#868ap33<%
" + }, + { + "type": "text", + "bbox": [ + 0.415, + 0.347, + 0.624, + 0.361 + ], + "angle": 0, + "content": "aAldofoi, aS; atalfoi: all other anatolian oboforators of otects. 1 oxtinct nands, aHs: all extinctions, for foxtroomes sereis (outcited) I am Alstomos, Rictus sp. to theft, to rott and aftall to sell otles, or licee, and so on." + }, + { + "type": "text", + "bbox": [ + 0.415, + 0.366, + 0.624, + 0.39 + ], + "angle": 0, + "content": "```bash\n#renatIdack Corrgend to state-vtextction system to objects. CN in All and Chon\nPectmon Is dendrites into Commute on ooclastin or Donr Tnp, eutment the\nimagery mod fringes to arf the Articn on A mitronin or aortothetotnoid\nned\n#recognition VOLCOVOLD, Cogenture: GcGmCunlty: VOLCOVOLD" + }, + { + "type": "text", + "bbox": [ + 0.242, + 0.397, + 0.3, + 0.411 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.452, + 0.398, + 0.58, + 0.412 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "title", + "bbox": [ + 0.649, + 0.2, + 0.803, + 0.22 + ], + "angle": 0, + "content": "You Only Look Once: Unified, Real-Time Object Detection" + }, + { + "type": "text", + "bbox": [ + 0.649, + 0.221, + 0.805, + 0.234 + ], + "angle": 0, + "content": "Joseph Redmon, Santosh Dlwala, Ross Girishk, Ali Farhedi Abstract" + }, + { + "type": "text", + "bbox": [ + 0.649, + 0.235, + 0.835, + 0.285 + ], + "angle": 0, + "content": "We presentYOLO, a new approach to object detection. Prior work on object detection epires classifiers to perform detection. Instead, we frame object detection as a regression problem to spatially separated bounding beves and associated class probabilities. A single neural network, predicts brednis bounding boxes and class reliabilities directly from full evaluation. Since the whole detection pipeline is a singkwork, it can be optimised end-to-end directly on detection performance." + }, + { + "type": "text", + "bbox": [ + 0.649, + 0.291, + 0.83, + 0.368 + ], + "angle": 0, + "content": "Our unified architecture is extremely fast. Our base VOLO model precursors images in real-time at 45 frames per second. A smaller version of trieva, Faat VOLO, processes an aetounding 155 frames per second. ¥8 frames per second while clll achieving double the MAP of real-time detectors. Compared is sisl-site detection systems VOLO makes mark deterrms. VOLO makes more lecdiscipli predict fasse ertris to ter en ptiplicit false detections where nothing exists. Finally, VOLO, VOLO lesrs vs every revalur representations of objects all other detection methods, including DPN and R-CNN, by a wide when generalizing from natural images to artwork artwork on both on the Picasso Dataset and the People-Art Dataset." + }, + { + "type": "text", + "bbox": [ + 0.689, + 0.396, + 0.798, + 0.412 + ], + "angle": 0, + "content": "Playground-v3" + }, + { + "type": "title", + "bbox": [ + 0.162, + 0.428, + 0.259, + 0.443 + ], + "angle": 0, + "content": "Input Text:" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.445, + 0.795, + 0.478 + ], + "angle": 0, + "content": "\"Generate A realistic screenshot of the first page of the Paper from the following information:" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.483, + 0.655, + 0.5 + ], + "angle": 0, + "content": "Title: You Only Look Once: Unified, Real-Time Object Detection" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.502, + 0.716, + 0.518 + ], + "angle": 0, + "content": "Author List: Joseph Redmon, Santosh Divvala, Ross Girshick, Ali Farhadi" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.521, + 0.833, + 0.65 + ], + "angle": 0, + "content": "Abstract: We present YOLO, a new approach to object detection. Prior work on object detection repurposes classifiers to perform detection. Instead, we frame object detection as a regression problem to spatially separated bounding boxes and associated class probabilities. A single neural network predicts bounding boxes and class probabilities directly from full images in one evaluation. Since the whole detection pipeline is a single network, it can be optimized end-to-end directly on detection performance." + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.654, + 0.825, + 0.821 + ], + "angle": 0, + "content": "Our unified architecture is extremely fast. Our base YOLO model processes images in real-time at 45 frames per second. A smaller version of the network, Fast YOLO, processes an astounding 155 frames per second while still achieving double the mAP of other real-time detectors. Compared to state-of-the-art detection systems, YOLO makes more localization errors but is far less likely to predict false detections where nothing exists. Finally, YOLO learns very general representations of objects. It outperforms all other detection methods, including DPM and R-CNN, by a wide margin when generalizing from natural images to artwork on both the Picasso Dataset and the People-Art Dataset.\"" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.861, + 0.822, + 0.877 + ], + "angle": 0, + "content": "Figure 10: Task: Document image generation. The Setup and Observations are the same as Fig. 8." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.385, + 0.107 + ], + "angle": 0, + "content": "2.1.4 Panorama Image Generation" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.115, + 0.873, + 0.242 + ], + "angle": 0, + "content": "Panorama image generation aims at creating a 360-degree view of a static scene, enabling immersive and comprehensive visual experiences. In our experiments, we select Pano-SD [119] and Gemini 2.0 Flash [99] as the baselines, with representative results illustrated in Figure 11. The comparisons reveal that while the baseline models can generate coherent panorama-like images with seamlessly connectable left and right sides, GPT-4o struggles to produce a true panorama. In most cases, GPT-4o generates images that approximate a panoramic view but still fall short in ensuring the necessary continuity across the image boundaries. We attribute this limitation to the insufficient representation of panorama images in its training data, as well as a predisposition towards generating images with a higher vertical aspect ratio rather than a wider one. Consequently, in the realm of panorama image generation, GPT-4o is inferior to the existing baseline models." + }, + { + "type": "title", + "bbox": [ + 0.184, + 0.264, + 0.404, + 0.281 + ], + "angle": 0, + "content": "Panorama Image Generation" + }, + { + "type": "title", + "bbox": [ + 0.358, + 0.291, + 0.642, + 0.31 + ], + "angle": 0, + "content": "Evaluation: Is panorama image?" + }, + { + "type": "image", + "bbox": [ + 0.153, + 0.316, + 0.337, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.346, + 0.317, + 0.592, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.601, + 0.317, + 0.847, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.417, + 0.84, + 0.448 + ], + "angle": 0, + "content": "Input Text: \"Please generate a panorama image: A living room with hardwork floors, a fireplace, and large windows.\"" + }, + { + "type": "image", + "bbox": [ + 0.153, + 0.451, + 0.338, + 0.547 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.451, + 0.591, + 0.547 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.598, + 0.451, + 0.847, + 0.547 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.551, + 0.819, + 0.58 + ], + "angle": 0, + "content": "Input Text: \"Please generate a panorama image: A cozy study with built-in bookshelves and a leather.\"" + }, + { + "type": "image", + "bbox": [ + 0.153, + 0.586, + 0.334, + 0.681 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.343, + 0.586, + 0.59, + 0.681 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.598, + 0.586, + 0.848, + 0.681 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.686, + 0.79, + 0.716 + ], + "angle": 0, + "content": "Input Text: \"Please generate a panorama image: A bedroom with a ceiling fan, gray walls, hardwood floors, a bed, and a TV on the wall.\"" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.724, + 0.27, + 0.738 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.405, + 0.724, + 0.524, + 0.738 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.689, + 0.724, + 0.752, + 0.738 + ], + "angle": 0, + "content": "Pano-SD" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.764, + 0.873, + 0.862 + ], + "angle": 0, + "content": "Figure 11: Task: Panorama image generation, aiming to create immersive 360-degree views of static scenes. Setup: We compare GPT-4o with established baselines such as Pano-SD [119] and Gemini 2.0 Flash [99] to evaluate the generation of coherent panoramic images. Observations: While the baseline models reliably produce panoramas with seamlessly connected left and right sides, GPT-4o tends to only approximate a panoramic view and struggles to maintain continuity across image boundaries. This shortfall is likely due to limited panorama image representation in its training data and a tendency to generate images with a higher vertical aspect ratio rather than a wider one, rendering it inferior to the baselines in this task." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.325, + 0.106 + ], + "angle": 0, + "content": "2.2 Image-to-Image Tasks" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.117, + 0.283, + 0.132 + ], + "angle": 0, + "content": "2.2.1 Style Transfer" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.141, + 0.872, + 0.295 + ], + "angle": 0, + "content": "Style transfer is a classic yet evolving task in computer vision, aiming to render an image in a specific artistic style while preserving the original content. It bridges the domains of vision and art, enabling applications such as digital artwork creation, film post-production, and virtual reality environment design. Early approach [33] used convolutional neural networks to separate and recombine content and style representations from images. This seminal work enabled the artistic stylization of photographs by optimizing pixel values to match a desired style. To improve efficiency, Johnson et al. [47] proposed feed-forward networks for real-time style transfer using perceptual losses. Later methods such as AdaIN [43] and WCT [57] enabled arbitrary style transfer without retraining for each new style. Transformer-based models like StyTr² [23] have been introduced to enhance style transfer quality and better preserve structural details. More recently, with the rapid development of image synthesis techniques, especially diffusion models, style transfer has seen further advancements in both quality and controllability. However, transferring specific artistic styles still typically requires a non-trivial amount of training data." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.299, + 0.871, + 0.37 + ], + "angle": 0, + "content": "To comprehensively evaluate the style transfer capability of GPT-4o, we conduct comparisons against several recent competitive models, including Gemini 2.0 Flash [99] and Midjourney v6.1 [75]. Specifically, Figure 12 illustrates style transfer results for natural scenes, while Figure 13 focuses on human facial images. Across a diverse range of styles, such as Monet, Van Gogh, Pixar, Cyberpunk, Snoopy, Disney, Ghibli, and Cubism, GPT-4o demonstrates consistently superior performance in both stylistic fidelity and content preservation." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.374, + 0.872, + 0.46 + ], + "angle": 0, + "content": "Notably, in the case of Ghibli style transfer, GPT-4o exhibits remarkable fidelity to the original artistic aesthetics, closely resembling the target style with vivid color palettes and soft contours. In contrast, both Gemini and Midjourney often produce inconsistent visual styles and textures. Furthermore, GPT-4o excels at preserving fine-grained content details, such as facial structure, earrings, clothing, and hairstyles, which are often misrepresented or lost in the outputs of other models. These results suggest that GPT-4o not only captures high-level style semantics but also maintains strong spatial consistency and semantic alignment." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.162, + 0.211, + 0.329, + 0.227 + ], + "angle": 0, + "content": "Prompted Stylization" + }, + { + "type": "title", + "bbox": [ + 0.364, + 0.224, + 0.624, + 0.243 + ], + "angle": 0, + "content": "Evaluation: Consistency/style." + }, + { + "type": "image", + "bbox": [ + 0.155, + 0.248, + 0.323, + 0.335 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.325, + 0.248, + 0.5, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.247, + 0.683, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.684, + 0.247, + 0.851, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.306, + 0.335, + 0.7, + 0.349 + ], + "angle": 0, + "content": "Input Text: \"Generate the Monet style of this picture.\"" + }, + { + "type": "image", + "bbox": [ + 0.155, + 0.351, + 0.322, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.325, + 0.351, + 0.5, + 0.436 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.351, + 0.683, + 0.436 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.684, + 0.35, + 0.851, + 0.436 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.303, + 0.439, + 0.719, + 0.452 + ], + "angle": 0, + "content": "Input Text: \"Generate the Van Gogh style of this picture.\"" + }, + { + "type": "image", + "bbox": [ + 0.155, + 0.454, + 0.323, + 0.54 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.325, + 0.454, + 0.5, + 0.539 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.454, + 0.682, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.684, + 0.454, + 0.849, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.308, + 0.54, + 0.694, + 0.554 + ], + "angle": 0, + "content": "Input Text: \"Generate the Pixar style of this picture.\"" + }, + { + "type": "image", + "bbox": [ + 0.155, + 0.556, + 0.323, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.325, + 0.556, + 0.5, + 0.641 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.556, + 0.681, + 0.641 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.684, + 0.555, + 0.85, + 0.641 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.304, + 0.644, + 0.731, + 0.658 + ], + "angle": 0, + "content": "Input Text: \"Generate the Cyberpunk style of this picture.\"" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.665, + 0.284, + 0.68 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.384, + 0.665, + 0.437, + 0.678 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.665, + 0.646, + 0.678 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.704, + 0.664, + 0.816, + 0.679 + ], + "angle": 0, + "content": "Midjourney v6.1" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.699, + 0.872, + 0.797 + ], + "angle": 0, + "content": "Figure 12: Task: Style transfer, aiming to render an image in a specific artistic style while preserving the original content. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and Midjourney v6.1 [75] on natural scene style transfer across multiple artistic domains. Observations: GPT-4o exhibits significantly better content preservation compared to Midjourney v6.1, maintaining fine-grained content details and structural consistency. In terms of style, it faithfully adheres to the textual description, effectively rendering vivid color palettes and soft contours that characterize the target style. This alignment notably surpasses both Gemini 2.0 Flash and Midjourney v6.1, highlighting GPT-4o's strong capabilities in preserving content and faithfully rendering diverse styles." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.223, + 0.097, + 0.371, + 0.111 + ], + "angle": 0, + "content": "Prompted Stylization" + }, + { + "type": "text", + "bbox": [ + 0.4, + 0.111, + 0.631, + 0.124 + ], + "angle": 0, + "content": "Evaluation: Consistency/style." + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.125, + 0.348, + 0.247 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.379, + 0.125, + 0.489, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.125, + 0.65, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.125, + 0.798, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.329, + 0.247, + 0.696, + 0.259 + ], + "angle": 0, + "content": "Input Text: \"Generate the Simpsons style of this picture.\"" + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.26, + 0.348, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.379, + 0.259, + 0.488, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.26, + 0.651, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.259, + 0.797, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.334, + 0.382, + 0.688, + 0.394 + ], + "angle": 0, + "content": "Input Text: \"Generate the Snoopy style of this picture.\"" + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.395, + 0.348, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.379, + 0.395, + 0.487, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.395, + 0.651, + 0.514 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.395, + 0.8, + 0.514 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.334, + 0.516, + 0.684, + 0.528 + ], + "angle": 0, + "content": "Input Text:\"Generate the Disney style of this picture.\"" + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.529, + 0.348, + 0.648 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.379, + 0.528, + 0.487, + 0.647 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.528, + 0.651, + 0.647 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.528, + 0.798, + 0.647 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.338, + 0.649, + 0.681, + 0.661 + ], + "angle": 0, + "content": "Input Text: \"Generate the Ghibli style of this picture.\"" + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.661, + 0.348, + 0.781 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.38, + 0.661, + 0.487, + 0.78 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.661, + 0.651, + 0.78 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.662, + 0.8, + 0.78 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.791, + 0.325, + 0.804 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.336, + 0.781, + 0.688, + 0.792 + ], + "angle": 0, + "content": "Input Text: \"Generate the Cubism style of this picture.\"" + }, + { + "type": "text", + "bbox": [ + 0.41, + 0.793, + 0.455, + 0.804 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.793, + 0.633, + 0.804 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.687, + 0.792, + 0.786, + 0.805 + ], + "angle": 0, + "content": "Midjourney v6.1" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.82, + 0.872, + 0.932 + ], + "angle": 0, + "content": "Figure 13: Task: Style transfer, aiming to render an image in a specific artistic style while preserving the original content. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and Midjourney v6.1 [75] on human face style transfer across multiple artistic domains. Observations: GPT-4o exhibits significantly better content preservation compared to Gemini 2.0 Flash and Midjourney v6.1, maintaining fine-grained content details and structural consistency. In terms of style, it faithfully adheres to the textual description, effectively rendering vivid color palettes and soft contours that characterize the target style. This alignment notably surpasses both Gemini 2.0 Flash and Midjourney v6.1 far away, highlighting GPT-4o's strong capabilities in preserving content and faithfully rendering diverse styles." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.283, + 0.108 + ], + "angle": 0, + "content": "2.2.2 Image Editing" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.115, + 0.871, + 0.228 + ], + "angle": 0, + "content": "Image editing involves modifying the visual elements, composition, or data of an image to achieve a desired outcome. This process can range from minor refinements to significant alterations, while maintaining the integrity of the original image. Over time, image editing techniques have evolved from manual, labor-intensive methods to sophisticated AI-driven approaches. Prior works [10, 30, 9, 120, 5, 29, 4, 40] have demonstrated the ability to perform various editing tasks based on textual instructions, such as adding, removing, or replacing objects; altering backgrounds, colors, or styles; and adjusting the number, size, or positions of objects. However, these models still exhibit limitations in certain scenarios, particularly in preserving non-edited regions, maintaining consistent image characteristics, and ensuring seamless blending between edited and non-edited areas." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.232, + 0.871, + 0.333 + ], + "angle": 0, + "content": "We compare GPT-4o with MGIE [30], LEDs++ [9], MagicBrush [120], and Gemini 2.0 Flash [99], which are representative of current SOTA methods. These experiments evaluate GPT-4o's subject preservation and instruction-following capabilities to determine its effectiveness compared with existing methods. Comparative results are shown in Figure 14 through Figure 19. We find that GPT-4o achieves performance comparable to, and in many cases surpassing, SOTA baselines in image editing tasks. From these examples, GPT-4o exhibits the fewest failure cases, demonstrating a strong generalization ability across a wide variety of editing tasks. It consistently outperforms baseline models across multiple editing scenarios. We highlight several key observations:" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.342, + 0.455, + 0.358 + ], + "angle": 0, + "content": "Strengths of GPT-4o in image editing:" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.361, + 0.868, + 0.404 + ], + "angle": 0, + "content": "- Fine-grained editing: GPT-4o shows a superior ability to handle fine-grained editing tasks. For instance, in example 2 of Figure 14 and example 1 of Figure 15, GPT-4o successfully modified small, detailed objects such as a toothpick and pink ballerina slippers, outperforming prior methods." + }, + { + "type": "text", + "bbox": [ + 0.202, + 0.405, + 0.868, + 0.461 + ], + "angle": 0, + "content": "- Substantial image transformations: GPT-4o excels at large-scale edits, such as background changes or object transformations, while maintaining visual coherence and realism. These complex edits require robust contextual and semantic understanding. Example 1 in Figure 16 illustrates GPT-4o's effective handling of a major background alteration task." + }, + { + "type": "text", + "bbox": [ + 0.202, + 0.462, + 0.868, + 0.519 + ], + "angle": 0, + "content": "- Subject preservation: GPT-4o demonstrates strong subject-preserving capabilities, avoiding common artifacts such as facial distortions or component loss. In example 2 of Figure 14, GPT-4o retains the content of a drink that Gemini 2.0 Flash erroneously altered. Similarly, in example 5 of Figure 19, GPT-4o best preserves fuselage patterns and textual markings on an airplane." + }, + { + "type": "text", + "bbox": [ + 0.202, + 0.52, + 0.868, + 0.604 + ], + "angle": 0, + "content": "- Instruction and original image adherence: GPT-4o shows a notable ability to follow instructions and maintain the structure of the original image, particularly in style editing and tasks involving object quantity, size, or position. This likely stems from its advanced understanding of both the image content and the editing instructions. For example, Figure 18 demonstrates GPT-4o's capability in style translation. Example 2 in Figure 17 shows its understanding of the term \"orange\" in both textual and visual contexts. A similar ability is illustrated in example 4 of Figure 19." + }, + { + "type": "list", + "bbox": [ + 0.201, + 0.361, + 0.868, + 0.604 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.607, + 0.468, + 0.623 + ], + "angle": 0, + "content": "- Limitations of GPT-4o in image editing:" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.627, + 0.868, + 0.669 + ], + "angle": 0, + "content": "- GPT-4o underperforms in scenarios where strict preservation of the original image's lighting, shading, and color tones is required. In such cases, the edited images may exhibit noticeable shifts in visual consistency. This is evident in examples 1 and 5 of Figure 14 and example 4 of Figure 15." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.67, + 0.868, + 0.699 + ], + "angle": 0, + "content": "- In some cases, GPT-4o may fail to retain image details outside the intended edit region. For instance, example 4 in Figure 14 shows a degradation in image quality in non-targeted areas." + }, + { + "type": "list", + "bbox": [ + 0.201, + 0.627, + 0.868, + 0.699 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.711, + 0.871, + 0.782 + ], + "angle": 0, + "content": "In summary, GPT-4o demonstrates substantial advancements in image editing, showing exceptional capabilities in detailed and large-scale edits, subject preservation, and adherence to instructions. While there are limitations in strictly maintaining original image characteristics such as lighting and tonal consistency, GPT-4o significantly reduces failure cases and outperforms existing baselines across a wide range of editing tasks, pushing the boundaries of current SOTA performance." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.222, + 0.102, + 0.324, + 0.119 + ], + "angle": 0, + "content": "Image Editing" + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.135, + 0.358, + 0.151 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.355, + 0.135, + 0.665, + 0.152 + ], + "angle": 0, + "content": "Evaluation: Instruction-following / faithful." + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.16, + 0.338, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.159, + 0.495, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.159, + 0.651, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.664, + 0.16, + 0.806, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.195, + 0.272, + 0.468, + 0.284 + ], + "angle": 0, + "content": "Input Text: \"Add a notebook to the desk.\"" + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.289, + 0.338, + 0.399 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.289, + 0.493, + 0.398 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.288, + 0.65, + 0.399 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.289, + 0.806, + 0.399 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.401, + 0.588, + 0.414 + ], + "angle": 0, + "content": "Input Text: \"Put a toothpick in the top of the left sandwich.\"" + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.418, + 0.336, + 0.526 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.418, + 0.493, + 0.526 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.418, + 0.649, + 0.527 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.663, + 0.418, + 0.805, + 0.527 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.527, + 0.472, + 0.541 + ], + "angle": 0, + "content": "Input Text: \"Change the goats into moose.\"" + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.546, + 0.336, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.546, + 0.495, + 0.656 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.546, + 0.65, + 0.656 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.664, + 0.546, + 0.806, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.659, + 0.514, + 0.672 + ], + "angle": 0, + "content": "Input Text: \"Replace potatoes with baked beans.\"" + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.675, + 0.338, + 0.783 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.675, + 0.495, + 0.784 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.675, + 0.651, + 0.784 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.675, + 0.806, + 0.784 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.787, + 0.568, + 0.801 + ], + "angle": 0, + "content": "Input Text: \"Change the fire hydrant to a parking meter.\"" + }, + { + "type": "image_caption", + "bbox": [ + 0.228, + 0.809, + 0.312, + 0.823 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "image_caption", + "bbox": [ + 0.403, + 0.809, + 0.451, + 0.821 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "image_caption", + "bbox": [ + 0.529, + 0.809, + 0.636, + 0.821 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "image_caption", + "bbox": [ + 0.72, + 0.809, + 0.76, + 0.821 + ], + "angle": 0, + "content": "MGIE" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.84, + 0.871, + 0.911 + ], + "angle": 0, + "content": "Figure 14: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MGIE [30]. Observations: GPT-4o achieves higher success rates than MGIE (examples 2/5) but occasionally alters unintended elements (bread in example 4) or lighting/shading structures (example 5). This likely stems from stronger generalization capacity and creative adaptation focus in training, though reduced fidelity suggests insufficient constraints on structural details during fine-tuning." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.22, + 0.101, + 0.322, + 0.117 + ], + "angle": 0, + "content": "Image Editing" + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.127, + 0.355, + 0.143 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.355, + 0.13, + 0.663, + 0.146 + ], + "angle": 0, + "content": "Evaluation: Instruction-following / faithful." + }, + { + "type": "image", + "bbox": [ + 0.195, + 0.15, + 0.336, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.149, + 0.495, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.509, + 0.149, + 0.653, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.15, + 0.808, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.196, + 0.262, + 0.596, + 0.277 + ], + "angle": 0, + "content": "Input Text: \"Turn everyone shoes into pink ballerina slippers.\"" + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.278, + 0.338, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.278, + 0.496, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.278, + 0.651, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.664, + 0.278, + 0.808, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.389, + 0.584, + 0.403 + ], + "angle": 0, + "content": "Input Text: \"Remove the fence from in front of the horses.\"" + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.407, + 0.336, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.407, + 0.495, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.509, + 0.406, + 0.655, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.406, + 0.808, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.195, + 0.516, + 0.55, + 0.53 + ], + "angle": 0, + "content": "Input Text: \"Remove the baby elephant in the picture.\"" + }, + { + "type": "image", + "bbox": [ + 0.195, + 0.532, + 0.338, + 0.642 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.533, + 0.495, + 0.642 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.533, + 0.655, + 0.642 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.532, + 0.81, + 0.642 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.195, + 0.644, + 0.547, + 0.658 + ], + "angle": 0, + "content": "Input Text: \"Change the yellow hat into a cowboy hat.\"" + }, + { + "type": "image", + "bbox": [ + 0.195, + 0.66, + 0.338, + 0.768 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.355, + 0.66, + 0.498, + 0.768 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.66, + 0.655, + 0.768 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.66, + 0.808, + 0.77 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.195, + 0.769, + 0.55, + 0.784 + ], + "angle": 0, + "content": "Input Text: \"Remove the people from the background\"." + }, + { + "type": "image_caption", + "bbox": [ + 0.223, + 0.788, + 0.306, + 0.803 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "image_caption", + "bbox": [ + 0.398, + 0.789, + 0.447, + 0.801 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "image_caption", + "bbox": [ + 0.526, + 0.789, + 0.632, + 0.801 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "image_caption", + "bbox": [ + 0.716, + 0.789, + 0.756, + 0.801 + ], + "angle": 0, + "content": "MGIE" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.821, + 0.872, + 0.906 + ], + "angle": 0, + "content": "Figure 15: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MGIE [30]. Observations: From examples 1-3, GPT-4o shows higher success in fine detail edits and large-scale edits with occlusions. This likely stems from GPT-4o's stronger contextual understanding and ability to infer missing or obscured elements, enabling more precise localized edits and coherent large-scale modifications even with partial visibility. However, it sometimes erases non-target elements (e.g., the house in example 5) and significantly alters global lighting (example 4)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.222, + 0.101, + 0.324, + 0.117 + ], + "angle": 0, + "content": "Image Editing" + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.126, + 0.355, + 0.143 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.355, + 0.13, + 0.664, + 0.146 + ], + "angle": 0, + "content": "Evaluation: Instruction-following / faithful." + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.152, + 0.339, + 0.261 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.354, + 0.151, + 0.496, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.15, + 0.65, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.667, + 0.15, + 0.808, + 0.261 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.262, + 0.685, + 0.275 + ], + "angle": 0, + "content": "Input Text: \"Change the background to the set of a nickelodeon game show.\"" + }, + { + "type": "image", + "bbox": [ + 0.195, + 0.279, + 0.336, + 0.388 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.279, + 0.495, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.279, + 0.651, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.663, + 0.279, + 0.805, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.391, + 0.483, + 0.405 + ], + "angle": 0, + "content": "Input Text: \"Have the dog prick up its ears.\"" + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.412, + 0.334, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.412, + 0.495, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.509, + 0.411, + 0.651, + 0.517 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.411, + 0.806, + 0.518 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.518, + 0.489, + 0.53 + ], + "angle": 0, + "content": "Input Text: \"Have the elephant's tail raised.\"" + }, + { + "type": "image", + "bbox": [ + 0.195, + 0.534, + 0.336, + 0.644 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.352, + 0.535, + 0.495, + 0.644 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.535, + 0.649, + 0.642 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.535, + 0.807, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.647, + 0.541, + 0.661 + ], + "angle": 0, + "content": "Input Text: \"Change the background to Vatican City.\"" + }, + { + "type": "image", + "bbox": [ + 0.196, + 0.665, + 0.339, + 0.772 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.355, + 0.664, + 0.496, + 0.772 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.664, + 0.655, + 0.772 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.664, + 0.807, + 0.772 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.775, + 0.552, + 0.789 + ], + "angle": 0, + "content": "Input Text: \"Change the background to Mount Rainier.\"" + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.798, + 0.307, + 0.813 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.4, + 0.798, + 0.448, + 0.811 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.798, + 0.633, + 0.811 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.717, + 0.798, + 0.757, + 0.811 + ], + "angle": 0, + "content": "MGIE" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.836, + 0.87, + 0.907 + ], + "angle": 0, + "content": "Figure 16: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MGIE [30]. Observations: From Example 1, GPT-4o demonstrates superior performance in style editing, effectively interpreting style instructions and preserving global image structure—a capability lacking in baseline models (MGIE, Gemini 2.0 Flash, and MagicBrush, as will be shown later). This likely stems from its stronger cross-modal comprehension and structural awareness during training." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.225, + 0.102, + 0.326, + 0.119 + ], + "angle": 0, + "content": "Image Editing" + }, + { + "type": "title", + "bbox": [ + 0.336, + 0.128, + 0.666, + 0.148 + ], + "angle": 0, + "content": "Evaluation: Instruction-following / faithful." + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.153, + 0.339, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.352, + 0.153, + 0.495, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.153, + 0.649, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.663, + 0.153, + 0.804, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.198, + 0.264, + 0.53, + 0.277 + ], + "angle": 0, + "content": "Input Text: \"Add a white hat to the woman's head.\"" + }, + { + "type": "image", + "bbox": [ + 0.199, + 0.281, + 0.339, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.355, + 0.281, + 0.496, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.281, + 0.653, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.281, + 0.806, + 0.39 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.2, + 0.391, + 0.594, + 0.404 + ], + "angle": 0, + "content": "Input Text: \"Delete the oranges from the shelf in the image.\"" + }, + { + "type": "image", + "bbox": [ + 0.198, + 0.407, + 0.34, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.355, + 0.406, + 0.496, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.406, + 0.651, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.664, + 0.406, + 0.805, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.199, + 0.515, + 0.641, + 0.529 + ], + "angle": 0, + "content": "Input Text: \"Get rid of the water the elephants are walking through.\"" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.531, + 0.307, + 0.545 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.398, + 0.531, + 0.447, + 0.543 + ], + "angle": 0, + "content": "GPT-40" + }, + { + "type": "text", + "bbox": [ + 0.525, + 0.531, + 0.63, + 0.543 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.698, + 0.531, + 0.766, + 0.543 + ], + "angle": 0, + "content": "LEDS++" + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.551, + 0.337, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.551, + 0.493, + 0.658 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.551, + 0.649, + 0.658 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.664, + 0.551, + 0.805, + 0.658 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.198, + 0.66, + 0.483, + 0.674 + ], + "angle": 0, + "content": "Input Text: \"Show the seal raising its head.\"" + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.678, + 0.336, + 0.785 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.678, + 0.493, + 0.786 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.678, + 0.65, + 0.785 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.663, + 0.678, + 0.804, + 0.785 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.198, + 0.787, + 0.497, + 0.8 + ], + "angle": 0, + "content": "Input Text: \"Change the sky to stars at night.\"" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.802, + 0.31, + 0.816 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.4, + 0.802, + 0.45, + 0.814 + ], + "angle": 0, + "content": "GPT-40" + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.802, + 0.633, + 0.814 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.698, + 0.802, + 0.774, + 0.816 + ], + "angle": 0, + "content": "MagicBrush" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.837, + 0.871, + 0.907 + ], + "angle": 0, + "content": "Figure 17: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/LEDITS++ [9]/MagicBrush [120]. Observations: From Examples 2 and 3, GPT-4o demonstrates stronger comprehension of instructions involving 'the oranges on the shelf' and 'the water the elephants are walking through', translating this understanding into more accurate edits. This suggests better grounding of textual prompts in visual context during generation." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.221, + 0.109, + 0.321, + 0.125 + ], + "angle": 0, + "content": "Image Editing" + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.136, + 0.355, + 0.153 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.355, + 0.141, + 0.665, + 0.157 + ], + "angle": 0, + "content": "Evaluation: Instruction-following / faithful." + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.163, + 0.333, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.164, + 0.495, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.164, + 0.651, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.664, + 0.165, + 0.805, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.193, + 0.274, + 0.659, + 0.288 + ], + "angle": 0, + "content": "Input Text: \"Change the image to a 1950s Flintstones cartoon art style.\"" + }, + { + "type": "image", + "bbox": [ + 0.192, + 0.292, + 0.334, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.292, + 0.495, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.292, + 0.65, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.292, + 0.808, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.404, + 0.503, + 0.417 + ], + "angle": 0, + "content": "Input Text: \"Change this into a cubist painting.\"" + }, + { + "type": "image", + "bbox": [ + 0.193, + 0.423, + 0.335, + 0.532 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.423, + 0.493, + 0.531 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.423, + 0.653, + 0.533 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.422, + 0.808, + 0.534 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.193, + 0.535, + 0.684, + 0.547 + ], + "angle": 0, + "content": "Input Text: \"Make the image appear as if it's a woodblock print by Hokusai.\"" + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.551, + 0.336, + 0.653 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.551, + 0.495, + 0.653 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.551, + 0.653, + 0.653 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.667, + 0.552, + 0.807, + 0.653 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.656, + 0.592, + 0.669 + ], + "angle": 0, + "content": "Input Text: \"Change the background to Fushimi Inari Taisha.\"" + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.672, + 0.334, + 0.782 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.673, + 0.493, + 0.781 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.673, + 0.649, + 0.78 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.673, + 0.807, + 0.781 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.193, + 0.783, + 0.603, + 0.798 + ], + "angle": 0, + "content": "Input Text: \"Make the image appear like a Rembrandt painting.\"" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.806, + 0.304, + 0.82 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.397, + 0.806, + 0.445, + 0.817 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.525, + 0.806, + 0.631, + 0.817 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.697, + 0.806, + 0.774, + 0.819 + ], + "angle": 0, + "content": "MagicBrush" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.843, + 0.87, + 0.899 + ], + "angle": 0, + "content": "Figure 18: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MagicBrush [120]. Observations: This set of examples further demonstrates GPT-4o's robust capabilities in style editing and background modification, consistent with the findings previously presented in Figure 16." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.22, + 0.108, + 0.321, + 0.124 + ], + "angle": 0, + "content": "Image Editing" + }, + { + "type": "image", + "bbox": [ + 0.331, + 0.133, + 0.353, + 0.149 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.353, + 0.135, + 0.663, + 0.151 + ], + "angle": 0, + "content": "Evaluation: Instruction-following / faithful." + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.161, + 0.332, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.347, + 0.161, + 0.489, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.162, + 0.65, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.162, + 0.805, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.272, + 0.511, + 0.285 + ], + "angle": 0, + "content": "Input Text: \"Make the image look like a cartoon.\"" + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.293, + 0.333, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.294, + 0.491, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.294, + 0.65, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.294, + 0.804, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.401, + 0.645, + 0.414 + ], + "angle": 0, + "content": "Input Text: \"Change the bike frame to be shiny metal instead of red.\"" + }, + { + "type": "image", + "bbox": [ + 0.196, + 0.426, + 0.335, + 0.532 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.357, + 0.426, + 0.496, + 0.532 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.426, + 0.651, + 0.532 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.667, + 0.426, + 0.806, + 0.532 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.195, + 0.534, + 0.557, + 0.547 + ], + "angle": 0, + "content": "Input Text: \"Change the table color from blue to black.\"" + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.555, + 0.338, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.356, + 0.556, + 0.496, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.556, + 0.655, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.556, + 0.809, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.195, + 0.659, + 0.536, + 0.673 + ], + "angle": 0, + "content": "Input Text: \"Change the woman's hair to be all blue.\"" + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.683, + 0.338, + 0.789 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.356, + 0.683, + 0.497, + 0.79 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.683, + 0.658, + 0.791 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.683, + 0.807, + 0.791 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.2, + 0.792, + 0.603, + 0.805 + ], + "angle": 0, + "content": "Input Text: \"Make the color of the airplane be yellow instead.\"" + }, + { + "type": "image_caption", + "bbox": [ + 0.227, + 0.808, + 0.308, + 0.822 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "image_caption", + "bbox": [ + 0.4, + 0.808, + 0.449, + 0.82 + ], + "angle": 0, + "content": "GPT-40" + }, + { + "type": "image_caption", + "bbox": [ + 0.528, + 0.808, + 0.634, + 0.82 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "image_caption", + "bbox": [ + 0.7, + 0.808, + 0.776, + 0.822 + ], + "angle": 0, + "content": "MagicBrush" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.844, + 0.871, + 0.901 + ], + "angle": 0, + "content": "Figure 19: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MagicBrush [120]. Observations: Example 4 highlights GPT-4o's superior image understanding—accurately distinguishing between hair and a scarf (where MagicBrush fails) to execute the edit. In Example 5, its precise retention of the plane's logo and text further demonstrates robust object-preservation capabilities." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.284, + 0.106 + ], + "angle": 0, + "content": "2.2.3 Customization" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.115, + 0.871, + 0.213 + ], + "angle": 0, + "content": "Customization, also known as subject-driven generation or personalization, aims to enable visual generative models to generate visual concepts from given reference images. Initial methods [31, 91] have achieved this by optimizing text embeddings or model weights. Subsequent approaches [50, 36, 46, 125, 94, 129] expanded on these approaches to handle multiple visual concepts. Customization plays a crucial role in making visual generative models more flexible and applicable across diverse domains. By empowering models to adapt to user-provided inputs, it ensures outputs are tailored to specific visual concepts. This is particularly significant in industries such as artistic creation and advertising, where individualization and creativity are paramount." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.219, + 0.872, + 0.372 + ], + "angle": 0, + "content": "To evaluate the performance of GPT-4o in this challenging task, we collect reference images from previous relevant works [130, 103], and conduct qualitative comparisons as shown in Figure 20 and Figure 21. For single-concept customization, we compare GPT-4o with Gemini 2.0 Flash and DisEnvisioner [130]. The results demonstrate that GPT-4o not only faithfully reproduces the visual concept from the reference image but also accurately adheres to the given textual description. In this task, GPT-4o significantly outperforms Gemini 2.0 Flash and achieves performance on par with the SOTA customization method. However, the images generated by GPT-4o still exhibit some \"copy-paste\" artifacts, leaving room for further improvement in the future. For multi-concept customization, we compare GPT-4o with Gemini 2.0 Flash and MS-Diffusion [103]. In this task, GPT-4o can still achieve competitive results for customizing multiple visual concepts in different contexts. Unfortunately, it struggles with certain unique combinations (e.g., making a dog wear a human dress), which could be attributed to the lack of relevant customization training data." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.377, + 0.871, + 0.421 + ], + "angle": 0, + "content": "Overall, GPT-4o demonstrates impressive performance in both single-concept and multi-concept customization tasks, showcasing strong concept fidelity and great text alignment. Despite some limitations, GPT-4o achieves remarkable results on par with SOTA customization methods and outperforms Gemini 2.0 Flash." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.935, + 0.509, + 0.948 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.175, + 0.135, + 0.29, + 0.166 + ], + "angle": 0, + "content": "Customization (Single concept)" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.175, + 0.198, + 0.195 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.2, + 0.18, + 0.797, + 0.199 + ], + "angle": 0, + "content": "Evaluation: Corresponding visual concepts of given reference images." + }, + { + "type": "image", + "bbox": [ + 0.151, + 0.204, + 0.319, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.323, + 0.204, + 0.49, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.495, + 0.204, + 0.663, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.204, + 0.835, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.341, + 0.806, + 0.373 + ], + "angle": 0, + "content": "Input Text: \"A dog on top of a purple rug in a forest, with reference to the attached image.\"" + }, + { + "type": "image", + "bbox": [ + 0.151, + 0.38, + 0.319, + 0.511 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.323, + 0.38, + 0.491, + 0.51 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.495, + 0.38, + 0.663, + 0.51 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.38, + 0.836, + 0.511 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.519, + 0.763, + 0.536 + ], + "angle": 0, + "content": "Input Text: \"A cat wearing a Santa hat, with reference to the attached image.\"" + }, + { + "type": "image", + "bbox": [ + 0.152, + 0.546, + 0.319, + 0.675 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.322, + 0.546, + 0.49, + 0.676 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.495, + 0.546, + 0.663, + 0.676 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.546, + 0.836, + 0.676 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.679, + 0.802, + 0.711 + ], + "angle": 0, + "content": "Input Text: \"A pair of glasses with a tree and autumn leaves in the background, with reference to the attached image.\"" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.719, + 0.285, + 0.736 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.719, + 0.434, + 0.734 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.719, + 0.642, + 0.734 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.7, + 0.719, + 0.804, + 0.733 + ], + "angle": 0, + "content": "DisEnvisioner" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.757, + 0.872, + 0.87 + ], + "angle": 0, + "content": "Figure 20: Task: Single-concept customization. The goal is to generate images that faithfully reproduce a single visual concept from reference images while aligning with a given textual description. Setup: Reference images are collected from prior works [130], and results are compared across GPT-4o, Gemini 2.0 Flash [99], and DisEnvisioner [130]. Each row includes the input reference image, text prompt, and the corresponding outputs. Observations: GPT-4o demonstrates strong performance in faithfully reproducing the single visual concept with high fidelity while adhering closely to the given textual description. It consistently outperforms Gemini 2.0 Flash and achieves results comparable to the SOTA method DisEnvisioner. However, some generated images still exhibit minor \"copy-paste\" artifacts, indicating room for further improvement." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.158, + 0.103, + 0.307, + 0.133 + ], + "angle": 0, + "content": "Customization (Multiple concepts)" + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.14, + 0.205, + 0.159 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.207, + 0.144, + 0.804, + 0.162 + ], + "angle": 0, + "content": "Evaluation: Corresponding visual concepts of given reference images." + }, + { + "type": "image", + "bbox": [ + 0.153, + 0.169, + 0.288, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.29, + 0.169, + 0.422, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.422, + 0.169, + 0.556, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.556, + 0.169, + 0.692, + 0.275 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.692, + 0.169, + 0.829, + 0.275 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.157, + 0.284, + 0.829, + 0.302 + ], + "angle": 0, + "content": "Input Text: \"A dog wearing a dress in the snow, with reference to the attached images.\"" + }, + { + "type": "image", + "bbox": [ + 0.153, + 0.31, + 0.286, + 0.414 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.287, + 0.31, + 0.42, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.421, + 0.31, + 0.554, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.555, + 0.31, + 0.688, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.688, + 0.31, + 0.823, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.156, + 0.421, + 0.804, + 0.453 + ], + "angle": 0, + "content": "Input Text: \"A flower with a barn in the background, with reference to the attached images.\"" + }, + { + "type": "image", + "bbox": [ + 0.153, + 0.46, + 0.286, + 0.563 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.287, + 0.46, + 0.42, + 0.563 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.421, + 0.46, + 0.554, + 0.563 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.555, + 0.46, + 0.688, + 0.563 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.688, + 0.46, + 0.822, + 0.563 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.157, + 0.568, + 0.781, + 0.601 + ], + "angle": 0, + "content": "Input Text: \"A backpack and a stuffed animal in the jungle, with reference to the attached images.\"" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.606, + 0.259, + 0.621 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.31, + 0.606, + 0.394, + 0.62 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.465, + 0.606, + 0.514, + 0.619 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.568, + 0.606, + 0.676, + 0.619 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.707, + 0.606, + 0.797, + 0.619 + ], + "angle": 0, + "content": "MS-Diffusion" + }, + { + "type": "image", + "bbox": [ + 0.153, + 0.635, + 0.26, + 0.723 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.263, + 0.635, + 0.375, + 0.723 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.377, + 0.635, + 0.489, + 0.723 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.49, + 0.635, + 0.599, + 0.723 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.6, + 0.635, + 0.713, + 0.723 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.714, + 0.635, + 0.826, + 0.723 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.157, + 0.727, + 0.825, + 0.76 + ], + "angle": 0, + "content": "Input Text: \"A lantern, a clock, and a backpack on a cobblestone street, with reference to the attached images.\"" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.77, + 0.245, + 0.785 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.77, + 0.357, + 0.785 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.386, + 0.771, + 0.47, + 0.785 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.771, + 0.564, + 0.784 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.597, + 0.771, + 0.706, + 0.784 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.722, + 0.771, + 0.813, + 0.784 + ], + "angle": 0, + "content": "MS-Diffusion" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.804, + 0.872, + 0.903 + ], + "angle": 0, + "content": "Figure 21: Task: Multi-concept customization. The goal is to generate images that effectively combine multiple visual concepts from reference images while aligning with a given textual description. Setup: Reference images are collected from prior works [103], and results are compared across GPT-4o, Gemini 2.0 Flash [99], and MS-Diffusion [103]. Each row includes the input reference images, text prompt, and the corresponding outputs. Observations: GPT-4o achieves competitive results in combining multiple visual concepts, showing strong fidelity to individual concepts and alignment with text prompts. However, its performance declines with unique or complex combinations. Despite this, GPT-4o outperforms Gemini 2.0 Flash and achieves results on par with SOTA methods." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.352, + 0.108 + ], + "angle": 0, + "content": "2.2.4 Story Image Generation" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.115, + 0.871, + 0.173 + ], + "angle": 0, + "content": "Story image generation is a task to generate coherent stories based on input text narratives. The conditions may also include the first story frame or character images. We choose Gemini 2.0 Flash [99], StoryDiffusion [38], SEED-Story [111], and DiffSensei [108] as baselines, due to their proven ability to generate coherent and expressive story images and their public availability. The results are shown in Figure 22 and Figure 23." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.178, + 0.872, + 0.344 + ], + "angle": 0, + "content": "In the first example, GPT-4o and StoryDiffusion successfully generate a three-panel short story about a fisherman, whereas Gemini 2.0 Flash fails by producing a single panel that appears to combine the three story narratives. In the second example, the story narrative is longer, spanning 11 panels. To evaluate this scenario with GPT-4o, we instruct the model to generate story images sequentially—using the input image and all previously generated images along with the corresponding text prompts. As shown in the figure, GPT-4o is capable of generating a long story with consistency. In the final example, we examine a Japanese black-and-white manga style with multiple input character images. GPT-4o is able to generate coherent stories, though it exhibits minor errors in character consistency (notably with the depiction of the woman) and misalignment with the input narrative (the narrative requires 7 panels, but only 6 are generated). The baseline Gemini 2.0 Flash performs worse, failing to preserve character status and the correct number of panels, as it also produces only 6 panels. Conversely, the DiffSensei model demonstrates superior performance, likely due to its specialized design and training for Japanese black-and-white manga generation." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.35, + 0.871, + 0.393 + ], + "angle": 0, + "content": "In conclusion, while GPT-4o achieves comparable performance to current baselines in story image generation, it shows limitations in specific scenarios—such as Japanese black-and-white manga and precise character status preservation—when compared to methods specifically tailored for those tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.17, + 0.144, + 0.362, + 0.16 + ], + "angle": 0, + "content": "Story Image Generation" + }, + { + "type": "title", + "bbox": [ + 0.359, + 0.17, + 0.642, + 0.19 + ], + "angle": 0, + "content": "Evaluation: Subject Consistency." + }, + { + "type": "image", + "bbox": [ + 0.158, + 0.197, + 0.346, + 0.284 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.226, + 0.287, + 0.28, + 0.299 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.197, + 0.465, + 0.283 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.468, + 0.197, + 0.579, + 0.283 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.468, + 0.287, + 0.578, + 0.302 + ], + "angle": 0, + "content": "StoryDiffusion" + }, + { + "type": "image", + "bbox": [ + 0.582, + 0.197, + 0.693, + 0.283 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.7, + 0.197, + 0.839, + 0.283 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.712, + 0.287, + 0.831, + 0.3 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "title", + "bbox": [ + 0.169, + 0.312, + 0.261, + 0.326 + ], + "angle": 0, + "content": "Input Text:" + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.327, + 0.319, + 0.34 + ], + "angle": 0, + "content": "\"Draw a story about:" + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.34, + 0.533, + 0.354 + ], + "angle": 0, + "content": "An old fisherman in a cable-knit sweater and boots" + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.355, + 0.35, + 0.369 + ], + "angle": 0, + "content": "1. Laying out a picnic solo" + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.369, + 0.347, + 0.383 + ], + "angle": 0, + "content": "2. Rowing a boat at dawn" + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.383, + 0.396, + 0.397 + ], + "angle": 0, + "content": "3. Stargazing with a telescope\"." + }, + { + "type": "list", + "bbox": [ + 0.169, + 0.355, + 0.396, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.159, + 0.409, + 0.349, + 0.556 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.208, + 0.559, + 0.3, + 0.574 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.409, + 0.443, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.466, + 0.453, + 0.51 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.511, + 0.428, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.446, + 0.409, + 0.525, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.446, + 0.466, + 0.548, + 0.51 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.446, + 0.511, + 0.493, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.456, + 0.561, + 0.509, + 0.574 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.409, + 0.608, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.466, + 0.608, + 0.51 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.511, + 0.608, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.625, + 0.412, + 0.673, + 0.451 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.625, + 0.451, + 0.673, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.625, + 0.482, + 0.673, + 0.508 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.625, + 0.508, + 0.673, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.678, + 0.412, + 0.727, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.678, + 0.46, + 0.728, + 0.508 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.678, + 0.508, + 0.728, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.733, + 0.412, + 0.783, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.733, + 0.46, + 0.783, + 0.508 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.733, + 0.508, + 0.783, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.788, + 0.412, + 0.838, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.788, + 0.46, + 0.838, + 0.508 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.788, + 0.508, + 0.838, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.686, + 0.561, + 0.776, + 0.576 + ], + "angle": 0, + "content": "SEED-Story" + }, + { + "type": "title", + "bbox": [ + 0.169, + 0.584, + 0.261, + 0.598 + ], + "angle": 0, + "content": "Input Text:" + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.599, + 0.447, + 0.613 + ], + "angle": 0, + "content": "\"Draw a story about George, a monkey:" + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.613, + 0.789, + 0.627 + ], + "angle": 0, + "content": "1. He looked around with a curious expression, wondering what adventures awaited him." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.627, + 0.435, + 0.64 + ], + "angle": 0, + "content": "2. Suddenly, George heard a noise. ..." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.64, + 0.662, + 0.655 + ], + "angle": 0, + "content": "3. To his surprise, the noise was George's friend, a small brown dog ..." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.655, + 0.799, + 0.669 + ], + "angle": 0, + "content": "4. George and the dog then played a game of hide and seek. George hid behind a couch ..." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.67, + 0.643, + 0.684 + ], + "angle": 0, + "content": "5. The next day, George and the dog decided to explore the city ..." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.684, + 0.617, + 0.699 + ], + "angle": 0, + "content": "6. George stopped on the city sidewalk, looking up at the sky ..." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.699, + 0.583, + 0.712 + ], + "angle": 0, + "content": "7. George then noticed a building with a reflective glass ..." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.712, + 0.764, + 0.726 + ], + "angle": 0, + "content": "8. George and the dog stood in front of the building, looking up at the lit windows ..." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.726, + 0.686, + 0.74 + ], + "angle": 0, + "content": "9. They were in a room with a door, waiting for their friend to join them" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.74, + 0.657, + 0.754 + ], + "angle": 0, + "content": "10. Suddenly, the door opened, and a man in a yellow suit walked in ..." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.754, + 0.822, + 0.769 + ], + "angle": 0, + "content": "11. He seemed deep in thought, unaware of George and the dog watching him from below ...\"." + }, + { + "type": "list", + "bbox": [ + 0.169, + 0.599, + 0.822, + 0.769 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.794, + 0.871, + 0.864 + ], + "angle": 0, + "content": "Figure 22: Task: Story image generation. The goal is to generate coherent story sequences based on narrative text, optionally conditioned on initial story frames or character images. Setup: Each example combines an input narrative (and, when available, reference character images) with a series of generated story panels. We compare outputs from GPT-4o against Gemini 2.0 Flash [99], StoryDiffusion [38], and SEED-Story [111]. Observations: GPT-4o exhibits strong narrative coherence and panel continuity, matching or surpassing general baselines." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.169, + 0.222, + 0.361, + 0.239 + ], + "angle": 0, + "content": "Story Image Generation" + }, + { + "type": "title", + "bbox": [ + 0.382, + 0.252, + 0.639, + 0.27 + ], + "angle": 0, + "content": "Evaluation: Subject Consistency." + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.274, + 0.238, + 0.341 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.162, + 0.342, + 0.237, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.162, + 0.409, + 0.238, + 0.475 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.149, + 0.48, + 0.247, + 0.495 + ], + "angle": 0, + "content": "Input Images" + }, + { + "type": "image", + "bbox": [ + 0.254, + 0.277, + 0.42, + 0.338 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.254, + 0.339, + 0.419, + 0.404 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.254, + 0.406, + 0.419, + 0.473 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.31, + 0.48, + 0.364, + 0.492 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "image", + "bbox": [ + 0.427, + 0.275, + 0.609, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.428, + 0.341, + 0.607, + 0.411 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.429, + 0.411, + 0.607, + 0.474 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.451, + 0.48, + 0.57, + 0.493 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "image", + "bbox": [ + 0.613, + 0.276, + 0.832, + 0.332 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.613, + 0.333, + 0.831, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.613, + 0.393, + 0.831, + 0.473 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.682, + 0.48, + 0.762, + 0.493 + ], + "angle": 0, + "content": "DiffSensei" + }, + { + "type": "title", + "bbox": [ + 0.159, + 0.513, + 0.249, + 0.526 + ], + "angle": 0, + "content": "Input Text:" + }, + { + "type": "text", + "bbox": [ + 0.159, + 0.528, + 0.833, + 0.555 + ], + "angle": 0, + "content": "\"Please generate a black-and-white manga using the given characters (a young man, a child, and a woman). Each panel may appear 0-3 characters." + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.556, + 0.743, + 0.57 + ], + "angle": 0, + "content": "1. A man is lying on the floor surrounded by books and papers, with a radio nearby." + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.57, + 0.836, + 0.598 + ], + "angle": 0, + "content": "2. A woman with curly hair is smiling. She's wearing a patterned shirt and apron. She's holding a baby." + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.599, + 0.79, + 0.612 + ], + "angle": 0, + "content": "3. A man with a surprised expression, his mouth open as if he's about to shout or scream." + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.613, + 0.679, + 0.627 + ], + "angle": 0, + "content": "4. A young man with a surprised expression, is holding a baby on his back." + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.627, + 0.553, + 0.64 + ], + "angle": 0, + "content": "5. A man is holding a baby. The man's hair is disheveled." + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.641, + 0.68, + 0.654 + ], + "angle": 0, + "content": "6. A man with a surprised expression. His eyes wide and eyebrows raised." + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.655, + 0.825, + 0.669 + ], + "angle": 0, + "content": "7. A man carrying a child on his back walk up a staircase. The man is wearing a stripped shirt\"." + }, + { + "type": "list", + "bbox": [ + 0.159, + 0.556, + 0.836, + 0.669 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.702, + 0.871, + 0.786 + ], + "angle": 0, + "content": "Figure 23: Task: Story image generation. The goal is to generate coherent story sequences based on narrative text, optionally conditioned on initial story frames or character images. Setup: Each example combines an input narrative (and, when available, reference character images) with a series of generated story panels. We compare outputs from GPT-4o against baselines including Gemini 2.0 Flash [99] and DiffSensei [108]. Observations: GPT-4o shows minor shortcomings in precise character consistency and panel count in specialized contexts, such as Japanese black-and-white manga, where dedicated models like DiffSensei deliver superior performance." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.296, + 0.105 + ], + "angle": 0, + "content": "2.2.5 Low-level Vision" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.115, + 0.87, + 0.241 + ], + "angle": 0, + "content": "Low-level vision tasks aim to enhance the basic quality or detail of visual content by improving various aspects of an image. Initial methods often focused on optimizing single tasks, such as super-resolution [88, 95], denoising [61, 63, 55], restoration [60, 20, 62, 84, 15, 16, 17], color adjustment [59], and more [22, 66, 116, 1, 122]. As the technology progressed, subsequent approaches expanded these techniques to handle multiple low-level tasks simultaneously, which is called universal image restoration. Low-level tasks play a critical role in image generation and editing, allowing visual generative models to provide higher-quality outputs in real-world applications. By enabling models to adapt to diverse inputs, they ensure that the generated images perform well across different visual tasks. This is especially important in areas such as image restoration and video enhancement, where high-precision visual content optimization is crucial, such as in film post-production and autonomous driving." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.246, + 0.869, + 0.372 + ], + "angle": 0, + "content": "We evaluate the performance of GPT-4o in this challenging task. Firstly, for some image restoration tasks, such as super resolution, denoising, deraining, low-light enhancement, deblurring and dehazing. We collect reference images from previous relevant works Gemini 2.0 Flash and a universal image restoration model, InstructIR [20], as shown in Figures 24, 25, 26, 27, 28, 29, 33, 34. In most scenarios, GPT-4o guarantees high-quality output images, outperforming Gemini 2.0 Flash. However, there are still some degradation issues that are difficult to remove, as seen in the second image of the image denoising task. On the other hand, for low-level image restoration tasks, maintaining pixel consistency between the output and input images is crucial. GPT-4o does not perform well in this regard, as the content of many images changes. In contrast, InstructIR, designed specifically for image restoration, performs better, effectively removing degradation while maintaining pixel consistency throughout." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.377, + 0.869, + 0.502 + ], + "angle": 0, + "content": "For image inpainting and outpainting in Figure 30, 31. We compared Gemini 2.0 Flash with the latest inpainting and outpainting methods [66, 116, 22, 1]. Only the missing information needs to be completed, but GPT-4o still changes the undesired content of the image. Although the output image quality is higher, this is not ideal for evaluating the task itself. For human face inpainting, compared to the other two methods, the overall artistic style is more natural. For the colorization, we choose the latest colorization model CtrlColor [59]. The overall style is somewhat dark in Figure 32. Compared to Gemini 2.0 Flash, GPT-4o's colors are more natural and consistent with the style. However, there are some inaccuracies in color control. For example, in the second image, the cat's color is not white as specified in the text. Additionally, GPT-4o still exhibits issues with changes in image content, such as the shape of the human's face in the fourth image." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.508, + 0.869, + 0.619 + ], + "angle": 0, + "content": "For the image re-lighting task in Figure 35, GPT-4o performs well in applying realistic lighting and shadows, with natural color tones that match the scene. However, it occasionally struggles with maintaining light consistency, particularly in complex lighting scenarios, such as neon or vibrant lights. Compared to Gemini 2.0 Flash, GPT-4o produces more natural and consistent results, but it doesn't always accurately replicate the lighting effects as seen in the second image, where the neon lighting could have been better captured. IC-Light [122] is effective in applying realistic lighting, but tends to lose detail in some complex objects or faces under different light conditions. Overall, GPT-4o is a strong contender for the image re-light task, providing good light consistency but leaving room for improvement in some specific scenarios." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.625, + 0.869, + 0.696 + ], + "angle": 0, + "content": "In summary, GPT-4o demonstrates strong performance in various low-level vision tasks, often surpassing Gemini 2.0 Flash in output quality with more natural and visually appealing results. However, it struggles with maintaining pixel consistency and avoiding undesired changes to image content, which are critical for tasks like restoration and inpainting. While its adaptability and realism are impressive, there is room for improvement in precision and task-specific consistency compared to specialized models like InstructIR and IC-Light." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "34" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.218, + 0.099, + 0.29, + 0.114 + ], + "angle": 0, + "content": "Denoising" + }, + { + "type": "image", + "bbox": [ + 0.386, + 0.119, + 0.405, + 0.137 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.406, + 0.123, + 0.602, + 0.138 + ], + "angle": 0, + "content": "Evaluation: Image Quality." + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.144, + 0.331, + 0.258 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.341, + 0.144, + 0.493, + 0.258 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.144, + 0.657, + 0.258 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.144, + 0.821, + 0.258 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.263, + 0.332, + 0.418 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.263, + 0.493, + 0.419 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.263, + 0.658, + 0.418 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.667, + 0.263, + 0.821, + 0.419 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.425, + 0.332, + 0.526 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.425, + 0.493, + 0.528 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.425, + 0.658, + 0.528 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.667, + 0.425, + 0.821, + 0.528 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.534, + 0.332, + 0.626 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.534, + 0.493, + 0.626 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.534, + 0.658, + 0.626 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.667, + 0.534, + 0.821, + 0.626 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.633, + 0.332, + 0.762 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.633, + 0.493, + 0.762 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.633, + 0.658, + 0.762 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.667, + 0.633, + 0.821, + 0.762 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.765, + 0.574, + 0.78 + ], + "angle": 0, + "content": "Input Text: \"Remove the noise, make the image clear.\"" + }, + { + "type": "text", + "bbox": [ + 0.208, + 0.786, + 0.3, + 0.801 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.386, + 0.785, + 0.438, + 0.797 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.522, + 0.786, + 0.639, + 0.799 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.701, + 0.785, + 0.782, + 0.797 + ], + "angle": 0, + "content": "InstructIR" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.838, + 0.872, + 0.922 + ], + "angle": 0, + "content": "Figure 24: Task: image denoising, aiming to remove the noise information and obtain high-quality clear version. Setup: We compare GPT-4o with InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the denoised images. Observations: GPT-4o can restore high-quality denoised images. Except for the second image, where the noise cannot be completely removed, the other images are free from noise. However, for low-level tasks, GPT-4o does not maintain content consistency well — the background colors and object shapes in many images have changed, such as the background color in the first image and the floor in the fourth image." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "35" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.197, + 0.191, + 0.272, + 0.208 + ], + "angle": 0, + "content": "Deraining" + }, + { + "type": "title", + "bbox": [ + 0.388, + 0.215, + 0.623, + 0.233 + ], + "angle": 0, + "content": "Evaluation: Image Quality." + }, + { + "type": "image", + "bbox": [ + 0.159, + 0.241, + 0.326, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.241, + 0.498, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.241, + 0.672, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.241, + 0.846, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.159, + 0.336, + 0.325, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.336, + 0.498, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.336, + 0.672, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.336, + 0.846, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.159, + 0.431, + 0.326, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.431, + 0.498, + 0.517 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.431, + 0.672, + 0.518 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.431, + 0.846, + 0.518 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.523, + 0.326, + 0.631 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.523, + 0.498, + 0.631 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.523, + 0.672, + 0.631 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.523, + 0.846, + 0.632 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.173, + 0.634, + 0.585, + 0.651 + ], + "angle": 0, + "content": "Input Text: \"Remove the rain, make the image clear.\"" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.662, + 0.29, + 0.679 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.386, + 0.663, + 0.444, + 0.677 + ], + "angle": 0, + "content": "GPT40" + }, + { + "type": "text", + "bbox": [ + 0.522, + 0.663, + 0.649, + 0.677 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.717, + 0.663, + 0.804, + 0.677 + ], + "angle": 0, + "content": "InstructIR" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.705, + 0.872, + 0.817 + ], + "angle": 0, + "content": "Figure 25: Task: image deraining, aiming to remove the rain streak and get high-quality clear version. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the derained images. Observations: The overall performance of the GPT-4o is well. However, the model struggles with maintaining content consistency in low-level visual details — for instance, the polar bear's background in the first image becomes unnaturally pink, and the underwater scene loses depth and clarity. The flowers also appear altered in color and arrangement. In contrast, InstructIR demonstrates the most consistent performance across all examples, effectively removing rain while preserving the original scene's structure, color, and composition. Overall, InstructIR is the most balanced and accurate model for image restoration in this comparison." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "36" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.198, + 0.216, + 0.271, + 0.232 + ], + "angle": 0, + "content": "Dehazing" + }, + { + "type": "title", + "bbox": [ + 0.382, + 0.234, + 0.616, + 0.255 + ], + "angle": 0, + "content": "Evaluation: Image Quality." + }, + { + "type": "image", + "bbox": [ + 0.165, + 0.259, + 0.321, + 0.352 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.334, + 0.26, + 0.49, + 0.352 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.26, + 0.66, + 0.352 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.677, + 0.26, + 0.832, + 0.352 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.165, + 0.364, + 0.321, + 0.467 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.335, + 0.364, + 0.49, + 0.467 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.364, + 0.659, + 0.467 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.677, + 0.364, + 0.832, + 0.467 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.166, + 0.479, + 0.321, + 0.602 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.335, + 0.479, + 0.49, + 0.603 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.479, + 0.659, + 0.603 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.677, + 0.479, + 0.832, + 0.602 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.606, + 0.714, + 0.623 + ], + "angle": 0, + "content": "Input Text: \"I took this photo during a foggy day. Can you improve it?\"" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.63, + 0.294, + 0.646 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.38, + 0.631, + 0.437, + 0.644 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.631, + 0.642, + 0.644 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.71, + 0.631, + 0.797, + 0.644 + ], + "angle": 0, + "content": "InstructIR" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.681, + 0.871, + 0.792 + ], + "angle": 0, + "content": "Figure 26: Task: image dehazing, aiming to remove the haze information and get high-quality clear version. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the dehazed images. Observations: GPT-4o performs moderately well in dehazing, managing to restore clearer structures and contrast in most scenes. However, its outputs often have a grayish or desaturated tone, especially visible in the second and third rows. Gemini 2.0 Flash produces more colorful results but tends to leave some haze behind, leading to a less crisp output. InstructIR outperforms both, offering the most visually natural and sharp dehazing across all examples while preserving original colors and details. Overall, InstructIR demonstrates the strongest capability in removing haze while maintaining realism." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "37" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.149, + 0.265, + 0.332, + 0.283 + ], + "angle": 0, + "content": "Low-light Enhancement" + }, + { + "type": "title", + "bbox": [ + 0.371, + 0.293, + 0.588, + 0.314 + ], + "angle": 0, + "content": "Evaluation: Consistency." + }, + { + "type": "image", + "bbox": [ + 0.164, + 0.32, + 0.321, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.32, + 0.49, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.319, + 0.659, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.319, + 0.829, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.165, + 0.411, + 0.321, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.411, + 0.49, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.411, + 0.659, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.411, + 0.829, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.165, + 0.501, + 0.321, + 0.583 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.334, + 0.5, + 0.49, + 0.583 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.5, + 0.659, + 0.582 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.5, + 0.829, + 0.583 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.587, + 0.792, + 0.605 + ], + "angle": 0, + "content": "Input Text: \"I took My image is too dark, I cannot see anything. Can you fix it?\"" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.617, + 0.293, + 0.634 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.383, + 0.617, + 0.44, + 0.631 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.617, + 0.646, + 0.632 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.71, + 0.617, + 0.798, + 0.631 + ], + "angle": 0, + "content": "InstructIR" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.662, + 0.871, + 0.747 + ], + "angle": 0, + "content": "Figure 27: Task: low-light image enhancement, aiming to increase the brightness of the image to obtain a high brightness image. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the brightness images. Observations: In low-light enhancement tasks, GPT-4o can brighten images and recover basic visibility, but often introduces unnatural lighting and loses detail, especially in the second row, where the image remains overly dark. InstructIR consistently delivers the most balanced results, enhancing visibility while preserving true colors and textures, making it the best performer across all three examples." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "38" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.197, + 0.233, + 0.275, + 0.25 + ], + "angle": 0, + "content": "Debluring" + }, + { + "type": "title", + "bbox": [ + 0.379, + 0.258, + 0.615, + 0.279 + ], + "angle": 0, + "content": "Evaluation: Image Quality." + }, + { + "type": "image", + "bbox": [ + 0.156, + 0.285, + 0.324, + 0.383 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.329, + 0.285, + 0.495, + 0.382 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.285, + 0.667, + 0.382 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.285, + 0.838, + 0.383 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.157, + 0.385, + 0.323, + 0.474 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.329, + 0.385, + 0.495, + 0.474 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.385, + 0.667, + 0.474 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.385, + 0.838, + 0.474 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.157, + 0.478, + 0.324, + 0.566 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.329, + 0.478, + 0.495, + 0.566 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.478, + 0.667, + 0.566 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.478, + 0.838, + 0.566 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.162, + 0.57, + 0.794, + 0.603 + ], + "angle": 0, + "content": "Input Text: \"I took this photo while I was running, can you stabilize the image? it is too blurry.\"" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.614, + 0.292, + 0.632 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.38, + 0.614, + 0.438, + 0.629 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.614, + 0.646, + 0.629 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.709, + 0.614, + 0.799, + 0.629 + ], + "angle": 0, + "content": "InstructIR" + }, + { + "type": "image_caption", + "bbox": [ + 0.126, + 0.661, + 0.872, + 0.774 + ], + "angle": 0, + "content": "Figure 28: Task: image deblurring, aiming to remove the blur information to obtain a clear image. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the deblurred images. Observations: For motion deblurring, GPT-4o recovers some sharpness, especially in fine details like text or faces, but the content is not matched with the original image. Gemini 2.0 Flash sharpens the image slightly better in some cases but can introduce over-smoothing, making the result look artificial. InstructIR demonstrates the best deblurring performance overall — restoring clear edges, facial features, and text while maintaining natural textures. It consistently produces the most stable and visually convincing results across all examples." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "39" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.171, + 0.231, + 0.307, + 0.248 + ], + "angle": 0, + "content": "Super-Resolution" + }, + { + "type": "title", + "bbox": [ + 0.379, + 0.246, + 0.616, + 0.268 + ], + "angle": 0, + "content": "Evaluation: Image Quality." + }, + { + "type": "image", + "bbox": [ + 0.168, + 0.273, + 0.33, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.273, + 0.497, + 0.396 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.273, + 0.667, + 0.396 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.675, + 0.273, + 0.836, + 0.396 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.169, + 0.403, + 0.327, + 0.502 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.403, + 0.496, + 0.502 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.403, + 0.664, + 0.502 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.676, + 0.403, + 0.834, + 0.502 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.169, + 0.509, + 0.328, + 0.599 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.509, + 0.497, + 0.598 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.509, + 0.665, + 0.598 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.675, + 0.509, + 0.834, + 0.598 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.604, + 0.813, + 0.636 + ], + "angle": 0, + "content": "Input Text: \"Make my photo bigger and better. Add details to this image. Increase the resolution of this photo.\"" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.646, + 0.296, + 0.663 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.389, + 0.646, + 0.446, + 0.66 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.646, + 0.647, + 0.661 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.701, + 0.646, + 0.788, + 0.661 + ], + "angle": 0, + "content": "InstructIR" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.693, + 0.872, + 0.779 + ], + "angle": 0, + "content": "Figure 29: Task: image super-resolution, aiming to improve the image resolution. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the deblurred images. Observations: In super-resolution, InstructIR delivers the most natural and detailed results across all examples—restoring fine edges in the card reader, realistic texture on the octopus, and sharp trees in the landscape. GPT-4o enhances clarity but misses details like the octopus surface and tree leaves. Gemini 2.0 Flash produces sharper outputs than GPT-4o but introduces unnatural textures and artifacts, especially in organic regions like the octopus and foliage." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "40" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.195, + 0.189, + 0.278, + 0.206 + ], + "angle": 0, + "content": "Inpainting" + }, + { + "type": "title", + "bbox": [ + 0.379, + 0.206, + 0.616, + 0.226 + ], + "angle": 0, + "content": "Evaluation: Image Quality." + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.235, + 0.323, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.341, + 0.235, + 0.485, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.236, + 0.653, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.678, + 0.237, + 0.824, + 0.347 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.35, + 0.714, + 0.367 + ], + "angle": 0, + "content": "Input Text: \"Please inpainting the image, make it looks reasonable.\"" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.371, + 0.315, + 0.476 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.373, + 0.477, + 0.477 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.373, + 0.655, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.676, + 0.373, + 0.822, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.479, + 0.716, + 0.495 + ], + "angle": 0, + "content": "Input Text: \"Please inpainting the image, make it looks reasonable.\"" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.505, + 0.317, + 0.616 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.339, + 0.505, + 0.476, + 0.616 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.506, + 0.653, + 0.617 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.678, + 0.505, + 0.822, + 0.616 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.619, + 0.831, + 0.65 + ], + "angle": 0, + "content": "Input Text: \"Inpaint the missing part of the face in the image, making the restored area look natural and seamless.\"" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.662, + 0.3, + 0.679 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.383, + 0.662, + 0.441, + 0.677 + ], + "angle": 0, + "content": "GPT40" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.663, + 0.645, + 0.678 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.713, + 0.662, + 0.804, + 0.676 + ], + "angle": 0, + "content": "LatentPaint" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.709, + 0.87, + 0.82 + ], + "angle": 0, + "content": "Figure 30: Task: Image inpainting, aiming to restore missing or masked regions in an image to appear natural and consistent with the context. Setup: We compare GPT-4o with baselines such as Gemini 2.0 Flash [99] and LatentPaint [22], evaluating their ability to fill in masked regions realistically. Observations: GPT-4o produces plausible completions but often lacks fine structure and texture alignment—e.g., the bricks in the first row appear flat and misaligned. Gemini 2.0 Flash generates more visually coherent textures, especially in natural scenes like the second row, but can introduce slight over-smoothing. LatentPaint performs the best, accurately reconstructing facial details and complex textures such as hair and expression in the third row, demonstrating superior semantic understanding and visual consistency." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.507, + 0.947 + ], + "angle": 0, + "content": "41" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.235, + 0.098, + 0.315, + 0.113 + ], + "angle": 0, + "content": "Outpainting" + }, + { + "type": "image", + "bbox": [ + 0.406, + 0.114, + 0.423, + 0.129 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.425, + 0.117, + 0.606, + 0.132 + ], + "angle": 0, + "content": "Evaluation: Image Quality." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.136, + 0.348, + 0.241 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.137, + 0.5, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.137, + 0.649, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.138, + 0.797, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.244, + 0.792, + 0.297 + ], + "angle": 0, + "content": "Input Text: \"Inpainting this image: a classic dark brown leather Chesterfield loveseat with tufted detailing and rolled arms. It sits in a cozy, traditionally styled living room with green walls, framed artwork, and warm lighting, creating an elegant and vintage atmosphere.\"" + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.299, + 0.351, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.299, + 0.5, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.299, + 0.65, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.659, + 0.299, + 0.796, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.223, + 0.405, + 0.78, + 0.445 + ], + "angle": 0, + "content": "Input Text: \"Extend the image to the left and right with a realistic continuation of the street, sidewalk, and background buildings. Maintain consistent lighting, shadows, and overall style.\"" + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.446, + 0.351, + 0.551 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.446, + 0.5, + 0.55 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.446, + 0.649, + 0.55 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.659, + 0.446, + 0.796, + 0.55 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.552, + 0.772, + 0.593 + ], + "angle": 0, + "content": "Input Text: \"Extend the image to the left and right, filling the black areas with a natural continuation of the snowy mountain landscape, ski path, trees, and sky. Keep the lighting, shadows, and textures consistent with the original image.\"" + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.594, + 0.351, + 0.698 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.594, + 0.5, + 0.698 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.594, + 0.649, + 0.698 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.594, + 0.796, + 0.698 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.7, + 0.763, + 0.754 + ], + "angle": 0, + "content": "Input Text: \"Outpaint the center of this panoramic image to naturally connect the left and right desert landscape. Fill the middle area with a realistic continuation of the rocky desert terrain and blue sky with clouds, ensuring seamless blending and consistent perspective.\"" + }, + { + "type": "text", + "bbox": [ + 0.243, + 0.758, + 0.329, + 0.772 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.407, + 0.759, + 0.456, + 0.771 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.524, + 0.759, + 0.633, + 0.771 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.693, + 0.759, + 0.768, + 0.771 + ], + "angle": 0, + "content": "Dream 360" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.79, + 0.871, + 0.928 + ], + "angle": 0, + "content": "Figure 31: Task: Image outpainting, aiming to extend the visual content of an image beyond its original boundaries coherently and realistically. Setup: We compare GPT-4o with Gemini 2.0 Flash [99], and some Specialized outpainting methods (SGT+ [116], StrDiffusion [66] and Dream360 [1]), evaluating their ability to extend content while maintaining visual consistency in lighting, texture, and semantics. Observations: The Specialized outpainting methods consistently produces the most coherent extensions — for example, it accurately maintains the room's lighting and decor in the first row, continues architectural lines and street perspective in the second, and creates seamless snowy landscapes in the third. GPT-4o offers plausible structure but often lacks fine detail and texture continuity, such as mismatched snow gradients or missing shadows. Gemini 2.0 Flash performs slightly better in semantic extension than GPT-4o but can introduce lighting inconsistencies and abrupt transitions, particularly in wide scenes like the desert in the final row." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "42" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.19, + 0.157, + 0.284, + 0.171 + ], + "angle": 0, + "content": "Colorization" + }, + { + "type": "title", + "bbox": [ + 0.371, + 0.175, + 0.608, + 0.194 + ], + "angle": 0, + "content": "Evaluation: Image Quality." + }, + { + "type": "image", + "bbox": [ + 0.157, + 0.2, + 0.318, + 0.284 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.327, + 0.2, + 0.488, + 0.284 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.2, + 0.659, + 0.284 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.676, + 0.2, + 0.837, + 0.284 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.167, + 0.29, + 0.692, + 0.307 + ], + "angle": 0, + "content": "Input Text: \"Colorize it: a red car parked on a cobblestone street.\"" + }, + { + "type": "image", + "bbox": [ + 0.158, + 0.315, + 0.317, + 0.399 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.328, + 0.316, + 0.489, + 0.399 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.316, + 0.659, + 0.399 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.676, + 0.315, + 0.836, + 0.399 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.167, + 0.404, + 0.801, + 0.436 + ], + "angle": 0, + "content": "Input Text: \"Colorize it: a couple of white and black kittens that are sitting in the purple grass.\"" + }, + { + "type": "image", + "bbox": [ + 0.158, + 0.438, + 0.317, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.328, + 0.438, + 0.488, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.438, + 0.659, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.676, + 0.438, + 0.836, + 0.522 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.167, + 0.524, + 0.736, + 0.539 + ], + "angle": 0, + "content": "Input Text: \"Colorize it: a red sports car parked on the side of a street.\"" + }, + { + "type": "image", + "bbox": [ + 0.162, + 0.544, + 0.321, + 0.667 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.544, + 0.492, + 0.666 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.544, + 0.663, + 0.666 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.679, + 0.544, + 0.841, + 0.666 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.669, + 0.77, + 0.686 + ], + "angle": 0, + "content": "Input Text: \"Colorize it: a woman wearing a yellow sunglasses with green lips\"" + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.694, + 0.289, + 0.71 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.378, + 0.694, + 0.435, + 0.707 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.694, + 0.642, + 0.707 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.726, + 0.694, + 0.798, + 0.707 + ], + "angle": 0, + "content": "CtrlColor" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.741, + 0.87, + 0.852 + ], + "angle": 0, + "content": "Figure 32: Task: Image colorization, aiming to add realistic and semantically consistent color to grayscale images based on textual prompts. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and CtrlColor [59], focusing on their ability to follow instructions and produce visually natural colorized outputs. Observations: CtrlColor performs the best overall, generating vivid and accurate colors that precisely match the prompts—such as green lips and yellow sunglasses in the last row, or the purple grass and kitten hues in the second. GPT-4o provides reasonably faithful colorization but often lacks richness or misinterprets tones (e.g., slightly dull red in the third row or inconsistent purple grass). Gemini 2.0 Flash is more vivid than GPT-4o but tends to oversaturate or produce stylized effects, especially on human features." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "43" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.171, + 0.154, + 0.302, + 0.168 + ], + "angle": 0, + "content": "Shadow Removal" + }, + { + "type": "image", + "bbox": [ + 0.386, + 0.175, + 0.408, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.41, + 0.179, + 0.623, + 0.195 + ], + "angle": 0, + "content": "Evaluation: Image Quality." + }, + { + "type": "image", + "bbox": [ + 0.165, + 0.199, + 0.333, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.199, + 0.501, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.199, + 0.678, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.199, + 0.851, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.164, + 0.303, + 0.331, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.303, + 0.506, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.303, + 0.676, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.681, + 0.303, + 0.849, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.164, + 0.402, + 0.331, + 0.501 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.334, + 0.402, + 0.506, + 0.502 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.402, + 0.676, + 0.502 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.681, + 0.402, + 0.849, + 0.501 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.164, + 0.504, + 0.334, + 0.605 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.504, + 0.506, + 0.605 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.504, + 0.677, + 0.605 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.681, + 0.504, + 0.848, + 0.606 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.61, + 0.828, + 0.658 + ], + "angle": 0, + "content": "Input Text: \"Remove all harsh shadows from the image. Make the lighting even and soft across the entire scene. Preserve all objects, colors, and details exactly as they are. Make it look like it was taken under diffuse studio lighting.\"" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.674, + 0.301, + 0.692 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.675, + 0.441, + 0.689 + ], + "angle": 0, + "content": "GPT40" + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.675, + 0.652, + 0.689 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.711, + 0.675, + 0.83, + 0.689 + ], + "angle": 0, + "content": "ShadowRefiner" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.727, + 0.872, + 0.854 + ], + "angle": 0, + "content": "Figure 33: Task: Shadow removal, aiming to eliminate harsh shadows while preserving the integrity of the scene, textures, and lighting balance. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and ShadowRefiner [25] to evaluate how well each method removes shadows and retains original object fidelity and lighting consistency. Observations: ShadowRefiner consistently achieves the most natural and effective shadow removal. It produces even, diffuse lighting across all scenes—e.g., softening shadows without distorting textures in complex scenes like the miniatures and dog portrait. Gemini 2.0 Flash removes shadows reasonably but occasionally leaves faint traces or flattens contrast, as seen in the second and fourth rows. GPT-4o shows stronger shadow reduction than Gemini 2.0 Flash but sometimes alters surface brightness or loses detail fidelity. ShadowRefiner best preserves the original color tones and textures while eliminating harsh shadows." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "44" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.16, + 0.099, + 0.313, + 0.113 + ], + "angle": 0, + "content": "Reflection Removal" + }, + { + "type": "image", + "bbox": [ + 0.373, + 0.127, + 0.395, + 0.146 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.396, + 0.132, + 0.609, + 0.148 + ], + "angle": 0, + "content": "Evaluation: Image Quality." + }, + { + "type": "image", + "bbox": [ + 0.169, + 0.157, + 0.323, + 0.255 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.339, + 0.158, + 0.492, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.158, + 0.663, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.679, + 0.158, + 0.831, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.258, + 0.812, + 0.29 + ], + "angle": 0, + "content": "Input Text: \"Remove window reflections, preserve interior details clearly visible through the glass, maintain natural lighting and perspective, photo-realistic result.\"" + }, + { + "type": "image", + "bbox": [ + 0.169, + 0.293, + 0.323, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.293, + 0.491, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.509, + 0.293, + 0.662, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.679, + 0.293, + 0.831, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.396, + 0.842, + 0.429 + ], + "angle": 0, + "content": "Input Text: \"Remove the reflection of buildings on the wet ground surface, make it look like a clean and dry textured concrete floor, realistic lighting and natural color tones.\"" + }, + { + "type": "image", + "bbox": [ + 0.169, + 0.432, + 0.323, + 0.546 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.432, + 0.491, + 0.546 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.509, + 0.432, + 0.662, + 0.546 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.678, + 0.432, + 0.831, + 0.546 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.166, + 0.549, + 0.841, + 0.582 + ], + "angle": 0, + "content": "Input Text: \"Remove reflections from the glass doors, make the interior clearly visible with natural lighting and sharp details, keep the golden door frame realistic and intact.\"" + }, + { + "type": "image", + "bbox": [ + 0.17, + 0.587, + 0.323, + 0.682 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.587, + 0.491, + 0.681 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.509, + 0.587, + 0.662, + 0.681 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.679, + 0.587, + 0.831, + 0.681 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.684, + 0.811, + 0.73 + ], + "angle": 0, + "content": "Input Text: \"Remove reflections from the car window, make the interior of the vehicle clearly visible, preserve natural lighting and realistic textures, keep the car frame untouched.\"" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.742, + 0.292, + 0.758 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.742, + 0.439, + 0.756 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.522, + 0.741, + 0.651, + 0.756 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.735, + 0.741, + 0.78, + 0.756 + ], + "angle": 0, + "content": "DSIT" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.787, + 0.872, + 0.913 + ], + "angle": 0, + "content": "Figure 34: Task: Reflection removal, aiming to eliminate unwanted reflections from transparent or reflective surfaces while preserving original content and realistic lighting. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and DSIT [39], assessing their ability to remove reflections while maintaining scene realism, texture fidelity, and lighting consistency. Observations: DSIT shows the most effective and natural reflection removal across all examples. It restores interior visibility through windows (e.g., bed and car interior) while preserving lighting and geometry. Gemini 2.0 Flash removes some reflections but often leaves faded traces or dulls textures, especially on glass doors and wet pavement. GPT-4o performs better than Gemini 2.0 Flash in preserving background details but sometimes alters color tones and sharpness. Overall, DSIT provides the cleanest and most photorealistic results, especially for transparent surfaces like glass and reflective wet ground." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "45" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.217, + 0.102, + 0.35, + 0.117 + ], + "angle": 0, + "content": "Image Re-lightning" + }, + { + "type": "image", + "bbox": [ + 0.404, + 0.119, + 0.424, + 0.135 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.426, + 0.122, + 0.633, + 0.137 + ], + "angle": 0, + "content": "Evaluation: Light consistency." + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.135, + 0.318, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.326, + 0.137, + 0.429, + 0.217 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.441, + 0.137, + 0.555, + 0.217 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.564, + 0.137, + 0.678, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.687, + 0.137, + 0.789, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.219, + 0.449, + 0.233 + ], + "angle": 0, + "content": "Input Text: \"Given two input images:" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.233, + 0.546, + 0.245 + ], + "angle": 0, + "content": "Image 1: A classical marble statue in neutral lighting." + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.245, + 0.657, + 0.258 + ], + "angle": 0, + "content": "Image 2: A city street at night illuminated by neon pink and blue lights." + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.257, + 0.773, + 0.281 + ], + "angle": 0, + "content": "Please generate a relit version of the statue from Image 1, as if it were lit by the lighting conditions of Image 2." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.28, + 0.774, + 0.307 + ], + "angle": 0, + "content": "The result should preserve the details and pose of the statue but apply realistic colored lighting and shadows consistent with the vibrant, mixed neon lighting of the second image." + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.31, + 0.294, + 0.324 + ], + "angle": 0, + "content": "Light Map" + }, + { + "type": "text", + "bbox": [ + 0.336, + 0.311, + 0.415, + 0.324 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.474, + 0.311, + 0.52, + 0.323 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.572, + 0.312, + 0.664, + 0.323 + ], + "angle": 0, + "content": "Gemini Pro 2.0" + }, + { + "type": "text", + "bbox": [ + 0.713, + 0.312, + 0.768, + 0.325 + ], + "angle": 0, + "content": "IC-Light" + }, + { + "type": "title", + "bbox": [ + 0.21, + 0.342, + 0.43, + 0.357 + ], + "angle": 0, + "content": "Text-Prompt Image Re-lightning" + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.371, + 0.341, + 0.49 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.357, + 0.371, + 0.484, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.372, + 0.638, + 0.49 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.372, + 0.788, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.491, + 0.758, + 0.518 + ], + "angle": 0, + "content": "Input Text: \"Sunlight through the blinds, near window blinds with a reasonable background.\"" + }, + { + "type": "image", + "bbox": [ + 0.216, + 0.519, + 0.336, + 0.627 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.37, + 0.519, + 0.482, + 0.626 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.519, + 0.636, + 0.626 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.519, + 0.788, + 0.627 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.629, + 0.762, + 0.644 + ], + "angle": 0, + "content": "Input Text: \"Sunlight from the left side, beach with a reasonable background.\"" + }, + { + "type": "image", + "bbox": [ + 0.21, + 0.647, + 0.343, + 0.755 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.358, + 0.647, + 0.49, + 0.755 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.647, + 0.638, + 0.755 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.647, + 0.789, + 0.755 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.756, + 0.763, + 0.771 + ], + "angle": 0, + "content": "Input Text: \"Sunlight from the left side, beach with a reasonable background.\"" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.779, + 0.319, + 0.793 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.405, + 0.779, + 0.454, + 0.791 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.779, + 0.631, + 0.792 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.7, + 0.778, + 0.759, + 0.793 + ], + "angle": 0, + "content": "IC-Light" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.82, + 0.871, + 0.932 + ], + "angle": 0, + "content": "Figure 35: Task: Image relighting, aiming to modify the lighting of a given image based on either a reference light map or a textual description, while preserving identity, texture, and spatial consistency. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and IC-Light [122] on two subtasks: reference-based and text-based relighting. Evaluations focus on lighting realism, directionality, shadow accuracy, and semantic preservation. Observations: IC-Light achieves the most realistic and consistent relighting across both tasks—accurately applying neon lighting from a reference image and generating sharp shadows and natural light from text prompts. Gemini 2.0 Flash preserves content well but produces softer, less directional lighting. GPT-4o offers more vivid lighting than Gemini 2.0 Flash but sometimes lacks shadow accuracy or background coherence." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.937, + 0.509, + 0.948 + ], + "angle": 0, + "content": "46" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.29, + 0.106 + ], + "angle": 0, + "content": "2.2.6 Spatial Control" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.115, + 0.871, + 0.173 + ], + "angle": 0, + "content": "Spatial control aims to generate visual outputs that not only reflect the content described in the prompt, but also precisely adhere to additional structural conditions (e.g., canny edge maps, depth maps, sketches, poses, and masks). This task evaluates a model's ability to faithfully align text guidance with visual constraints—an essential capability for real-world creative applications such as illustration, animation, digital content creation, and visual storytelling." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.178, + 0.872, + 0.234 + ], + "angle": 0, + "content": "In this section, we examine GPT-4o's performance across five representative types of controllable conditions: canny, depth, sketch, pose, and mask. For each setting, we compare its outputs with those from Gemini 2.0 Flash [99] and a strong baseline method using ControlNet-based [121] diffusion backbones (FLUX.1-Dev [51], SDXL1.0 [82], SD3 Medium [27] or SD1.5 [90]). The results are illustrated in Figures 36, 37, 38, 39, 40." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.239, + 0.87, + 0.449 + ], + "angle": 0, + "content": "Overall, GPT-4o achieves performance that is on par with ControlNet-based methods in many cases, especially under common or moderately complex conditions. In particular, GPT-4o is capable of handling semantically rich or contextually complex prompts, where its strong foundation model understanding can help preserve both high-level semantics and visual plausibility. This is especially evident in tasks like pose-to-image or mask-to-image, where the structural signal may be sparse or ambiguous. However, GPT-4o's strong generative prior can sometimes lead to overly detailed or hallucinated elements, which compromises structural fidelity. For instance, in canny-to-image or depth-to-image tasks that require fine-grained geometric alignment, GPT-4o may deviate from the input layout more noticeably than traditional diffusion-based methods. In contrast, ControlNet exhibits more stable and accurate control in these low-level structure-guided scenarios, making it better suited for applications where spatial accuracy is critical. That said, ControlNet may struggle in more complex or open-ended cases, such as mask-to-image scenes involving multiple objects or interactions (e.g., aquariums with visitors and fish). In these scenarios, GPT-4o's strong cross-modal understanding partially compensates for its weaker control, offering plausible but not fully precise outputs. By comparison, Gemini 2.0 Flash lacks robust controllable generation capabilities across all evaluated control types. Its outputs often fail to match either the control condition or the textual prompt, reflecting limited capacity in multimodal alignment and structural grounding." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.453, + 0.871, + 0.511 + ], + "angle": 0, + "content": "In summary, GPT-4o demonstrates performance comparable to SOTA methods in most cases, excelling in tasks that require rich semantic understanding and contextual complexity while maintaining a balance between high-level semantics and visual plausibility. Although it may exhibit structural deviations in tasks requiring precise geometric alignment, its strong generative prior gives it an advantage in handling complex or open-ended scenarios." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "47" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.171, + 0.151, + 0.295, + 0.168 + ], + "angle": 0, + "content": "Canny-to-Image" + }, + { + "type": "title", + "bbox": [ + 0.302, + 0.188, + 0.711, + 0.206 + ], + "angle": 0, + "content": "Evaluation: Controllability and text consistency." + }, + { + "type": "image", + "bbox": [ + 0.279, + 0.184, + 0.299, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.151, + 0.215, + 0.318, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.325, + 0.215, + 0.493, + 0.344 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.216, + 0.661, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.217, + 0.838, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.349, + 0.798, + 0.381 + ], + "angle": 0, + "content": "Input Text: \"Follow the prompt and canny condition below to generate a controllable image. The prompt is: a cigarette with purple tobacco.\"" + }, + { + "type": "image", + "bbox": [ + 0.151, + 0.387, + 0.317, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.324, + 0.387, + 0.492, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.497, + 0.387, + 0.664, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.67, + 0.387, + 0.837, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.522, + 0.798, + 0.554 + ], + "angle": 0, + "content": "Input Text: \"Follow the prompt and canny condition below to generate a controllable image. The prompt is: a traffic sign with red cross written on it.\"" + }, + { + "type": "image", + "bbox": [ + 0.151, + 0.561, + 0.317, + 0.69 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.325, + 0.561, + 0.492, + 0.689 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.561, + 0.663, + 0.689 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.669, + 0.561, + 0.837, + 0.689 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.692, + 0.821, + 0.724 + ], + "angle": 0, + "content": "Input Text: \"Follow the prompt and canny condition below to generate a controllable image. The prompt is: oil painting of geese flying in a v formation over a pond at sunset.\"" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.732, + 0.286, + 0.748 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.733, + 0.437, + 0.745 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.733, + 0.641, + 0.745 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.696, + 0.727, + 0.802, + 0.755 + ], + "angle": 0, + "content": "FLUX.1-Dev w. ControlNet" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.773, + 0.872, + 0.859 + ], + "angle": 0, + "content": "Figure 36: Task: Canny-to-Image generation. The goal is to generate prompt-aligned images guided by canny maps. Setup: Each row shows an input canny map and a text prompt, with outputs from GPT-4o, Gemini 2.0 Flash [99], and FLUX.1-Dev w. ControlNet [51]. Observations: GPT-4o performs worse than FLUX.1-Dev [51] in structural fidelity, often introducing additional visual details that deviate from the input edge map. However, it produces more semantically aligned and aesthetically pleasing results overall. Compared to Gemini 2.0 Flash, GPT-4o significantly outperforms in both structure preservation and prompt consistency." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "48" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.17, + 0.157, + 0.296, + 0.174 + ], + "angle": 0, + "content": "Depth-to-Image" + }, + { + "type": "title", + "bbox": [ + 0.302, + 0.189, + 0.711, + 0.207 + ], + "angle": 0, + "content": "Evaluation: Controllability and text consistency." + }, + { + "type": "image", + "bbox": [ + 0.15, + 0.216, + 0.317, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.323, + 0.217, + 0.491, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.496, + 0.217, + 0.664, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.67, + 0.217, + 0.838, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.157, + 0.349, + 0.8, + 0.381 + ], + "angle": 0, + "content": "Input Text: \"Follow the prompt and depth condition below to generate a controllable image. The prompt is: a wooden bridge that has fallen down in the grass.\"" + }, + { + "type": "image", + "bbox": [ + 0.151, + 0.385, + 0.317, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.323, + 0.385, + 0.492, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.385, + 0.664, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.67, + 0.385, + 0.839, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.156, + 0.519, + 0.8, + 0.551 + ], + "angle": 0, + "content": "Input Text: \"Follow the prompt and depth condition below to generate a controllable image. The prompt is: a 3d image of a stone building with plants and rocks.\"" + }, + { + "type": "image", + "bbox": [ + 0.151, + 0.554, + 0.317, + 0.642 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.323, + 0.554, + 0.491, + 0.642 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.496, + 0.554, + 0.664, + 0.642 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.67, + 0.554, + 0.838, + 0.642 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.157, + 0.644, + 0.8, + 0.676 + ], + "angle": 0, + "content": "Input Text: \"Follow the prompt and depth condition below to generate a controllable image. The prompt is: a red pillow on a chair.\"" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.685, + 0.285, + 0.701 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.378, + 0.685, + 0.434, + 0.699 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.685, + 0.642, + 0.699 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.694, + 0.675, + 0.802, + 0.705 + ], + "angle": 0, + "content": "FLUX.1-Dev w. ControlNet" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.729, + 0.872, + 0.855 + ], + "angle": 0, + "content": "Figure 37: Task: Depth-to-image generation, aiming to synthesize controllable and visually coherent images based on a text prompt and a given depth map. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and FLUX.1-Dev w. ControlNet [51], focusing on controllability, text-prompt alignment, and the visual quality of generated scenes. Observations: GPT-4o generates visually appealing and stylistically consistent images that align reasonably with text and depth cues—such as the bridge scene and stone ruins with rich lighting and artistic tone. However, its controllability is weaker than FLUX.1-Dev w. ControlNet [51], which shows more precise depth alignment and object placement, as seen in the accurate layout of the bridge and red pillow. GPT-4o leans toward stylized coherence, while FLUX emphasizes photorealism with sharper spatial fidelity. Gemini 2.0 Flash lags behind both, often showing depth misalignment, shape distortion, and weaker semantic grounding." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "49" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.165, + 0.126, + 0.3, + 0.141 + ], + "angle": 0, + "content": "Sketch-to-Image" + }, + { + "type": "image", + "bbox": [ + 0.277, + 0.159, + 0.298, + 0.178 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.303, + 0.163, + 0.711, + 0.181 + ], + "angle": 0, + "content": "Evaluation: Controllability and text consistency." + }, + { + "type": "image", + "bbox": [ + 0.149, + 0.19, + 0.317, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.322, + 0.19, + 0.492, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.496, + 0.19, + 0.665, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.67, + 0.19, + 0.838, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.324, + 0.807, + 0.356 + ], + "angle": 0, + "content": "Input Text: \"Follow the prompt and sketch condition below to generate a controllable image. The prompt is: A small giraffe eating grass.\"" + }, + { + "type": "image", + "bbox": [ + 0.147, + 0.361, + 0.317, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.321, + 0.361, + 0.491, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.496, + 0.36, + 0.664, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.67, + 0.36, + 0.838, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.494, + 0.807, + 0.525 + ], + "angle": 0, + "content": "Input Text: \"Follow the prompt and sketch condition below to generate a controllable image. The prompt is: A red metal electric fan.\"" + }, + { + "type": "image", + "bbox": [ + 0.147, + 0.528, + 0.317, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.321, + 0.529, + 0.491, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.496, + 0.529, + 0.664, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.67, + 0.529, + 0.838, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.66, + 0.807, + 0.692 + ], + "angle": 0, + "content": "Input Text: \"Follow the prompt and sketch condition below to generate a controllable image. The prompt is: a man holding on to the strings of a flying parachute.\"" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.698, + 0.285, + 0.714 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.378, + 0.698, + 0.434, + 0.712 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.699, + 0.642, + 0.713 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.721, + 0.694, + 0.788, + 0.707 + ], + "angle": 0, + "content": "SDXL1.0" + }, + { + "type": "text", + "bbox": [ + 0.7, + 0.708, + 0.806, + 0.721 + ], + "angle": 0, + "content": "w. ControlNet" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.742, + 0.871, + 0.882 + ], + "angle": 0, + "content": "Figure 38: Task: Sketch-to-image generation, which requires translating rough line drawings into realistic and semantically accurate images guided by text prompts. Setup: We evaluate GPT-4o against Gemini 2.0 Flash [99] and SDXL1.0 w. ControlNet [82], focusing on how well each model respects the provided sketch while reflecting the described content. Observations: GPT-4o excels at generating lifelike scenes that match the prompt, often delivering visually pleasing and contextually grounded outputs—like the natural posture and setting of the giraffe or the dynamic movement in the parachute example. However, it tends to soften or reinterpret sketch lines, leading to slight mismatches in fine structure. In contrast, SDXL1.0 w. ControlNet [82] offers stronger adherence to the input sketch, capturing geometric details more accurately (e.g., fan blades and figure outlines), albeit with slightly more synthetic textures. Gemini 2.0 Flash shows limited understanding of both sketch and prompt, often producing less realistic or structurally off-target images." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "50" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.175, + 0.118, + 0.292, + 0.134 + ], + "angle": 0, + "content": "Pose-to-Image" + }, + { + "type": "image", + "bbox": [ + 0.278, + 0.15, + 0.302, + 0.17 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.303, + 0.155, + 0.711, + 0.173 + ], + "angle": 0, + "content": "Evaluation: Controllability and text consistency." + }, + { + "type": "image", + "bbox": [ + 0.151, + 0.178, + 0.316, + 0.279 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.321, + 0.178, + 0.491, + 0.279 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.497, + 0.178, + 0.666, + 0.28 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.178, + 0.838, + 0.28 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.156, + 0.281, + 0.821, + 0.327 + ], + "angle": 0, + "content": "Input Text: \"Follow the prompt and pose condition below to generate a controllable image. The prompt is: Quarterly in a blue and white jersey with number 14, preparing to throw a football during a game.\"" + }, + { + "type": "image", + "bbox": [ + 0.151, + 0.331, + 0.317, + 0.477 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.321, + 0.331, + 0.492, + 0.477 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.497, + 0.33, + 0.664, + 0.477 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.67, + 0.33, + 0.838, + 0.477 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.156, + 0.48, + 0.84, + 0.527 + ], + "angle": 0, + "content": "Input Text: \"Follow the prompt and pose condition below to generate a controllable image. The prompt is : A young woman with long brown hair, wearing a blue strapless dress and a black necklace with a butterfly pendant, poses against a beige background.\"" + }, + { + "type": "image", + "bbox": [ + 0.151, + 0.532, + 0.316, + 0.686 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.321, + 0.532, + 0.49, + 0.686 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.497, + 0.532, + 0.666, + 0.686 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.532, + 0.838, + 0.686 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.156, + 0.689, + 0.79, + 0.721 + ], + "angle": 0, + "content": "Input Text: \"Follow the prompt and pose condition below to generate a controllable image. The prompt is : A woman is performing a pull-up exercise on a gym rack.\"" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.731, + 0.285, + 0.747 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.731, + 0.434, + 0.744 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.729, + 0.642, + 0.742 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.706, + 0.72, + 0.812, + 0.75 + ], + "angle": 0, + "content": "SD3 Medium w. ControlNet" + }, + { + "type": "image_footnote", + "bbox": [ + 0.127, + 0.766, + 0.872, + 0.892 + ], + "angle": 0, + "content": "Figure 39: Task: Pose-to-image generation, aiming to synthesize realistic images that reflect both the human pose and descriptive prompt. Setup: We benchmark GPT-4o against Gemini 2.0 Flash [99] and SD3 Medium w. ControlNet [27], evaluating their ability to follow pose conditions while generating semantically accurate and coherent images. Observations: GPT-4o performs well in complex scenes—such as the football example—where it effectively integrates pose, clothing, and background with strong realism, contextual and pose accuracy. In simpler cases like the pull-up exercise, it shows occasional pose drift, especially in limbs. SD3 Medium w. ControlNet [27] offers better pose fidelity overall, though its visual quality can be inconsistent. Gemini 2.0 Flash underperforms in both structure and coherence, often generating anatomically incorrect or visually weak results. Overall, GPT-4o balances text understanding and generation quality, especially in detailed prompts." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.507, + 0.948 + ], + "angle": 0, + "content": "51" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.107, + 0.294, + 0.124 + ], + "angle": 0, + "content": "Mask-to-Image" + }, + { + "type": "image", + "bbox": [ + 0.278, + 0.135, + 0.301, + 0.154 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.302, + 0.139, + 0.713, + 0.157 + ], + "angle": 0, + "content": "Evaluation: Controllability and text consistency." + }, + { + "type": "image", + "bbox": [ + 0.15, + 0.166, + 0.317, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.321, + 0.167, + 0.491, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.496, + 0.167, + 0.665, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.167, + 0.838, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.3, + 0.798, + 0.346 + ], + "angle": 0, + "content": "Input Text: \"Follow the prompt and pose condition below to generate a controllable image. The prompt is : A peaceful indoor church scene with a plain wall, stained glass windows, a wooden podium, and a stone altar under soft sunlight.\"" + }, + { + "type": "image", + "bbox": [ + 0.151, + 0.35, + 0.317, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.321, + 0.349, + 0.49, + 0.477 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.497, + 0.349, + 0.664, + 0.477 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.349, + 0.837, + 0.477 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.483, + 0.831, + 0.544 + ], + "angle": 0, + "content": "Input Text: \"Follow the prompt and pose condition below to generate a controllable image. The prompt is : An indoor aquarium scene with a large fish tank full of colorful tropical fish swimming. The fish tank is surrounded by walls and has a visible floor at the bottom. The environment is bright and underwater-themed.\"" + }, + { + "type": "image", + "bbox": [ + 0.15, + 0.546, + 0.317, + 0.675 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.321, + 0.547, + 0.49, + 0.674 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.497, + 0.547, + 0.664, + 0.674 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.547, + 0.837, + 0.675 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.677, + 0.834, + 0.722 + ], + "angle": 0, + "content": "Input Text: \"Follow the prompt and mask condition below to generate a controllable image. The prompt is: An indoor aquarium with a large fish tank and colorful tropical fish, with a few visitors in the scene.\"" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.728, + 0.286, + 0.745 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.378, + 0.728, + 0.436, + 0.743 + ], + "angle": 0, + "content": "GPT40" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.728, + 0.643, + 0.743 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.686, + 0.728, + 0.82, + 0.743 + ], + "angle": 0, + "content": "SD w. ControlNet" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.767, + 0.872, + 0.907 + ], + "angle": 0, + "content": "Figure 40: Task: Mask-to-image generation, which requires translating semantic segmentation maps and textual prompts into coherent and realistic images. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and SD1.5 w. ControlNet [90], focusing on their ability to combine spatial layout from the mask with deeper scene understanding from the prompt. Observations: Compared to previous control tasks, this setting demands more from the model in terms of semantic reasoning and compositional understanding. GPT-4o excels in this regard, producing visually consistent scenes that align with the prompt's intent—such as the serene church interior and the immersive aquarium setting with visitors. However, in fine-grained spatial control, especially with small or tightly shaped objects like tropical fish, SD1.5 w. ControlNet [90] performs better in preserving shape and positioning. Gemini 2.0 Flash continues to struggle in both fidelity and adherence to masks, often missing key scene elements or producing oversimplified outputs." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "52" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.296, + 0.105 + ], + "angle": 0, + "content": "2.2.7 Camera Control" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.115, + 0.871, + 0.213 + ], + "angle": 0, + "content": "Although recent visual generative models demonstrate remarkable capabilities in creating high-quality images, generating images with specific camera settings (e.g., bokeh blur parameters, focal length, shutter speed, color temperature) and making further adjustments remains a challenging task. We further explore GPT-4o's performance in camera control, evaluating its ability to generate images with desired photographic parameters in text instructions. This task is particularly significant as it bridges the gap between artistic creativity and technical precision, enabling users to simulate professional photography techniques and achieve greater control over the visual output. Such advancements have broad applications in fields like photography, cinematography, and visual design." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.219, + 0.869, + 0.318 + ], + "angle": 0, + "content": "Specifically, we collect text prompts from [118], and compare GPT-4o and Gemini 2.0 Flash [99] with Generative Photography (GP) [118]. The results are reported in Figures 41, 42. We can observe that GPT-4o achieves decent results in controlling bokeh blur parameters and color temperature, demonstrating its strong generalizability to various photographic settings. However, it still falls short in adjusting focal length and shutter speed, occasionally leading to inconsistent visual semantics or incorrect visual effects. By comparison, Gemini 2.0 Flash struggles significantly across all camera control scenarios, failing to produce coherent or accurate outputs that align with the specified photographic parameters, highlighting its limited capability in this domain." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.322, + 0.869, + 0.365 + ], + "angle": 0, + "content": "In this task, GPT-4o shows promising potential in camera control, outperforming Gemini 2.0 Flash and achieving competitive results in certain aspects. Nonetheless, there remains room for improvement in handling more complex adjustments, which could further enhance its applicability in professional photography and creative industries." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "53" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.175, + 0.149, + 0.296, + 0.163 + ], + "angle": 0, + "content": "Camera Control" + }, + { + "type": "image", + "bbox": [ + 0.216, + 0.177, + 0.238, + 0.196 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.241, + 0.181, + 0.773, + 0.199 + ], + "angle": 0, + "content": "Evaluation: Camera setting adjustment, semantic consistency." + }, + { + "type": "text", + "bbox": [ + 0.152, + 0.233, + 0.208, + 0.246 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.285, + 0.204, + 0.298 + ], + "angle": 0, + "content": "Gemini" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.303, + 0.211, + 0.316 + ], + "angle": 0, + "content": "2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.361, + 0.187, + 0.374 + ], + "angle": 0, + "content": "GP" + }, + { + "type": "image", + "bbox": [ + 0.215, + 0.204, + 0.334, + 0.267 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.215, + 0.268, + 0.334, + 0.331 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.215, + 0.332, + 0.334, + 0.396 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.258, + 0.4, + 0.288, + 0.411 + ], + "angle": 0, + "content": "28.0" + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.204, + 0.456, + 0.267 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.268, + 0.456, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.332, + 0.457, + 0.396 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.382, + 0.4, + 0.412, + 0.411 + ], + "angle": 0, + "content": "14.0" + }, + { + "type": "image", + "bbox": [ + 0.461, + 0.204, + 0.58, + 0.267 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.461, + 0.268, + 0.58, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.461, + 0.332, + 0.58, + 0.396 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.504, + 0.4, + 0.535, + 0.411 + ], + "angle": 0, + "content": "10.0" + }, + { + "type": "image", + "bbox": [ + 0.584, + 0.204, + 0.703, + 0.267 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.584, + 0.268, + 0.702, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.584, + 0.332, + 0.703, + 0.396 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.632, + 0.4, + 0.654, + 0.411 + ], + "angle": 0, + "content": "6.0" + }, + { + "type": "image", + "bbox": [ + 0.707, + 0.204, + 0.827, + 0.267 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.707, + 0.268, + 0.826, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.707, + 0.332, + 0.827, + 0.396 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.755, + 0.4, + 0.777, + 0.411 + ], + "angle": 0, + "content": "2.0" + }, + { + "type": "image_caption", + "bbox": [ + 0.153, + 0.416, + 0.808, + 0.463 + ], + "angle": 0, + "content": "Input Text: \"A horse with a white face stands in a grassy field, looking at the camera; with bokeh blur parameter *\" & \"Adjust the bokeh blur parameter to *\" (* indicates a specific value)." + }, + { + "type": "text", + "bbox": [ + 0.154, + 0.498, + 0.21, + 0.512 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.152, + 0.554, + 0.204, + 0.567 + ], + "angle": 0, + "content": "Gemini" + }, + { + "type": "text", + "bbox": [ + 0.144, + 0.572, + 0.212, + 0.585 + ], + "angle": 0, + "content": "2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.626, + 0.19, + 0.638 + ], + "angle": 0, + "content": "GP" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.47, + 0.336, + 0.533 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.535, + 0.335, + 0.597 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.598, + 0.336, + 0.66 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.261, + 0.665, + 0.292, + 0.676 + ], + "angle": 0, + "content": "24.9" + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.471, + 0.457, + 0.533 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.535, + 0.457, + 0.597 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.598, + 0.459, + 0.66 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.384, + 0.665, + 0.414, + 0.676 + ], + "angle": 0, + "content": "36.9" + }, + { + "type": "image", + "bbox": [ + 0.462, + 0.471, + 0.58, + 0.533 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.462, + 0.535, + 0.58, + 0.597 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.462, + 0.598, + 0.581, + 0.66 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.665, + 0.536, + 0.676 + ], + "angle": 0, + "content": "48.9" + }, + { + "type": "image", + "bbox": [ + 0.585, + 0.471, + 0.703, + 0.533 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.585, + 0.535, + 0.703, + 0.597 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.585, + 0.598, + 0.703, + 0.66 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.629, + 0.665, + 0.659, + 0.676 + ], + "angle": 0, + "content": "60.9" + }, + { + "type": "image", + "bbox": [ + 0.707, + 0.471, + 0.827, + 0.533 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.707, + 0.535, + 0.827, + 0.597 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.707, + 0.598, + 0.827, + 0.66 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.751, + 0.665, + 0.781, + 0.676 + ], + "angle": 0, + "content": "69.9" + }, + { + "type": "image_caption", + "bbox": [ + 0.16, + 0.681, + 0.836, + 0.713 + ], + "angle": 0, + "content": "Input Text: \"A beautiful garden filled with red roses and green leaves; with * mm lens\" & \"Adjust the lens to * mm\"." + }, + { + "type": "image_caption", + "bbox": [ + 0.126, + 0.734, + 0.871, + 0.86 + ], + "angle": 0, + "content": "Figure 41: Task: Camera control. The goal is to generate images aligned with specific photographic parameters, such as bokeh blur, focal length, shutter speed, and color temperature. Setup: Results are based on text prompts collected from [118], comparing outputs from GPT-4o, Gemini 2.0 Flash [99], and Generative Photography (GP) [118]. Each row includes the input text instructions and corresponding outputs. Observations: GPT-4o demonstrates strong performance in controlling bokeh blur, producing visually appealing and parameter-aligned results. However, it shows limitations in handling focal length, occasionally generating inconsistent or less accurate outputs. By contrast, Gemini 2.0 Flash struggles significantly in both aspects, often failing to produce coherent results. Overall, GPT-4o achieves better performance in this task but still requires further refinement to enhance focal length control." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "54" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.175, + 0.153, + 0.296, + 0.167 + ], + "angle": 0, + "content": "Camera Control" + }, + { + "type": "title", + "bbox": [ + 0.242, + 0.183, + 0.773, + 0.202 + ], + "angle": 0, + "content": "Evaluation: Camera setting adjustment, semantic consistency." + }, + { + "type": "text", + "bbox": [ + 0.155, + 0.232, + 0.21, + 0.244 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.208, + 0.335, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.208, + 0.458, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.463, + 0.208, + 0.58, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.584, + 0.208, + 0.703, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.707, + 0.208, + 0.826, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.148, + 0.289, + 0.211, + 0.321 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.27, + 0.336, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.27, + 0.459, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.464, + 0.27, + 0.581, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.585, + 0.27, + 0.703, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.708, + 0.27, + 0.826, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.361, + 0.194, + 0.374 + ], + "angle": 0, + "content": "GP" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.334, + 0.336, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.26, + 0.402, + 0.292, + 0.413 + ], + "angle": 0, + "content": "0.88" + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.334, + 0.459, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.383, + 0.402, + 0.415, + 0.413 + ], + "angle": 0, + "content": "0.68" + }, + { + "type": "image", + "bbox": [ + 0.463, + 0.334, + 0.581, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.402, + 0.537, + 0.413 + ], + "angle": 0, + "content": "0.48" + }, + { + "type": "image", + "bbox": [ + 0.585, + 0.334, + 0.703, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.628, + 0.402, + 0.659, + 0.413 + ], + "angle": 0, + "content": "0.38" + }, + { + "type": "image", + "bbox": [ + 0.707, + 0.334, + 0.826, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.75, + 0.402, + 0.781, + 0.413 + ], + "angle": 0, + "content": "0.28" + }, + { + "type": "image_caption", + "bbox": [ + 0.153, + 0.416, + 0.817, + 0.448 + ], + "angle": 0, + "content": "Input Text: \"A blue pot with a plant in it is placed on a window sill, surrounded by other potted plants; with shutter speed * second\" & \"Adjust the shutter speed to * second\"." + }, + { + "type": "text", + "bbox": [ + 0.155, + 0.484, + 0.21, + 0.497 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.459, + 0.336, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.341, + 0.459, + 0.459, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.463, + 0.459, + 0.581, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.585, + 0.459, + 0.704, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.708, + 0.459, + 0.827, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.148, + 0.54, + 0.215, + 0.572 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.523, + 0.336, + 0.583 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.341, + 0.524, + 0.459, + 0.583 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.463, + 0.523, + 0.581, + 0.583 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.585, + 0.524, + 0.704, + 0.583 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.708, + 0.524, + 0.827, + 0.583 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.612, + 0.194, + 0.624 + ], + "angle": 0, + "content": "GP" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.584, + 0.336, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.252, + 0.651, + 0.3, + 0.663 + ], + "angle": 0, + "content": "3100.0" + }, + { + "type": "image", + "bbox": [ + 0.341, + 0.584, + 0.459, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.375, + 0.651, + 0.423, + 0.663 + ], + "angle": 0, + "content": "4000.0" + }, + { + "type": "image", + "bbox": [ + 0.463, + 0.584, + 0.581, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.651, + 0.545, + 0.663 + ], + "angle": 0, + "content": "8000.0" + }, + { + "type": "image", + "bbox": [ + 0.585, + 0.584, + 0.704, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.621, + 0.651, + 0.668, + 0.663 + ], + "angle": 0, + "content": "7000.0" + }, + { + "type": "image", + "bbox": [ + 0.708, + 0.584, + 0.827, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.743, + 0.651, + 0.79, + 0.663 + ], + "angle": 0, + "content": "3000.0" + }, + { + "type": "image_caption", + "bbox": [ + 0.16, + 0.668, + 0.83, + 0.714 + ], + "angle": 0, + "content": "Input Text: \"A collection of trash cans and a potted plant are seen in the image. The trash cans are individually in blue, black and yellow; with temperature * kelvin\" & \"Adjust the temperature to * kelvin\"." + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.731, + 0.872, + 0.857 + ], + "angle": 0, + "content": "Figure 42: Task: Camera control. The goal is to generate images aligned with specific photographic parameters, such as bokeh blur, focal length, shutter speed, and color temperature. Setup: Results are based on text prompts collected from [118], comparing outputs from GPT-4o, Gemini 2.0 Flash [99], and Generative Photography (GP) [118]. Each row includes the input text instructions and corresponding outputs. Observations: GPT-4o demonstrates strong performance in controlling color temperature, producing coherent and visually accurate results. However, it struggles with shutter speed, occasionally resulting in inconsistent or unrealistic motion effects. In contrast, Gemini 2.0 Flash fails to consistently handle either parameter, often producing outputs that lack alignment with the desired settings. Overall, GPT-4o outperforms Gemini 2.0 Flash in this task, but further improvements are needed for precise shutter speed control." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "55" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.382, + 0.108 + ], + "angle": 0, + "content": "2.2.8 In-context Visual Prompting" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.115, + 0.871, + 0.214 + ], + "angle": 0, + "content": "The in-context visual prompting tasks aim at understanding and executing specific tasks on new query images by leveraging a pair of task-specific example images and accompanying text instructions. Previous works [105, 18, 52] have explored this capability in the context of diffusion and autoregressive models, demonstrating its potential in enhancing model adaptability. The significance of in-context visual prompting lies in its ability to enable models to generalize to novel tasks. This approach mirrors human-like learning, where new tasks can be understood and performed by observing relevant examples. This capability has broad implications across various domains, and paves the way for more flexible and efficient paradigms capable of adapting to a wide range of specific tasks." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.219, + 0.871, + 0.262 + ], + "angle": 0, + "content": "We curate four representative tasks to evaluate the performance of GPT-4o in in-context visual prompting. These tasks are designed to assess the model's ability to understand and adapt to specific visual tasks based on provided examples and guidance, including:" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.272, + 0.867, + 0.3 + ], + "angle": 0, + "content": "- Movie-Shot Generation: A three-shot image collected from [42] is provided as an example, and the model is instructed to follow this format to generate similar movie shots for the query image." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.305, + 0.867, + 0.333 + ], + "angle": 0, + "content": "- Ray-Tracing Rendering: An example gaming scene is provided with and without ray tracing, and the model is expected to render a ray-traced version of the query image." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.337, + 0.867, + 0.379 + ], + "angle": 0, + "content": "- Overlaid Mask Visualization: The model receives an original image accompanied by its corresponding segmented results from [49] and is tasked with outputting the segmented results in the same format for the query image." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.384, + 0.867, + 0.412 + ], + "angle": 0, + "content": "- Maze Solving: A maze and its corresponding solution path are provided as examples, and the model is required to draw the solution path for a new maze presented in the query image." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.272, + 0.867, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.423, + 0.872, + 0.548 + ], + "angle": 0, + "content": "All the results are illustrated in Figure 43. Compared with Gemini 2.0 Flash [99], GPT-4o demonstrates promising performance in movie-shot generation and ray-tracing rendering tasks, showcasing its ability to follow example formats and generate visually coherent outputs. However, it still struggles with maintaining consistent visual semantics across the generated outputs. For the overlaid mask visualization task, GPT-4o falls short in effectively executing the instructions. The result fails to adhere to the required format, indicating that the model's ability to process and generate complex outputs remains limited. For maze solving, a task that demands advanced visual reasoning and logical inference, GPT-4o struggles significantly. This highlights the challenges in combining higher-level reasoning with visual generation capabilities, suggesting that more sophisticated reasoning mechanisms are needed for tasks of this nature." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.554, + 0.871, + 0.597 + ], + "angle": 0, + "content": "In summary, GPT-4o shows considerable potential in in-context visual prompting, while it still underperforms in certain difficult tasks. These observations suggest that further advancements are necessary to enhance its generation and reasoning capabilities for more complex and diverse visual tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "56" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.202, + 0.113, + 0.398, + 0.128 + ], + "angle": 0, + "content": "In-Context Visual Prompting" + }, + { + "type": "image", + "bbox": [ + 0.193, + 0.138, + 0.214, + 0.155 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.142, + 0.814, + 0.158 + ], + "angle": 0, + "content": "Evaluation: Understanding and executing specific tasks with example images." + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.164, + 0.277, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.278, + 0.164, + 0.44, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.441, + 0.165, + 0.512, + 0.247 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.165, + 0.81, + 0.247 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.249, + 0.786, + 0.277 + ], + "angle": 0, + "content": "Input Text: \"The first image contains three movie shots. Please imitate this image and create the subsequent movie shots for the second image.\"" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.282, + 0.275, + 0.295 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.321, + 0.282, + 0.396, + 0.295 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.455, + 0.282, + 0.499, + 0.293 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.613, + 0.284, + 0.711, + 0.295 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "image", + "bbox": [ + 0.258, + 0.303, + 0.706, + 0.372 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.377, + 0.377, + 0.452, + 0.389 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "image", + "bbox": [ + 0.318, + 0.391, + 0.655, + 0.461 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.369, + 0.465, + 0.444, + 0.477 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "image_caption", + "bbox": [ + 0.524, + 0.467, + 0.621, + 0.478 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.479, + 0.816, + 0.521 + ], + "angle": 0, + "content": "Input Text: \"The first image includes an original gaming scene, and the scene enhanced with ray tracing. Please imitate this image and create the scene enhanced with ray tracing for the second image.\"" + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.526, + 0.433, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.434, + 0.526, + 0.553, + 0.592 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.558, + 0.526, + 0.68, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.682, + 0.526, + 0.806, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.595, + 0.825, + 0.624 + ], + "angle": 0, + "content": "Input Text: \"The first image shows an original image and its segmented results. Please imitate this image and output the segmented results in the same format for the second image.\"" + }, + { + "type": "image_caption", + "bbox": [ + 0.274, + 0.629, + 0.349, + 0.641 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "image_caption", + "bbox": [ + 0.458, + 0.629, + 0.532, + 0.641 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "image_caption", + "bbox": [ + 0.597, + 0.629, + 0.64, + 0.639 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "image_caption", + "bbox": [ + 0.694, + 0.629, + 0.791, + 0.64 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "image", + "bbox": [ + 0.205, + 0.661, + 0.277, + 0.716 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.293, + 0.661, + 0.362, + 0.718 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.391, + 0.656, + 0.476, + 0.722 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.492, + 0.656, + 0.575, + 0.72 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.6, + 0.666, + 0.674, + 0.718 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.691, + 0.666, + 0.762, + 0.718 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.73, + 0.813, + 0.759 + ], + "angle": 0, + "content": "Input Text: \"The first image displays an unsolved maze and the maze with a solution path in red. Please imitate this image and identify the solution path for the second image.\"" + }, + { + "type": "text", + "bbox": [ + 0.243, + 0.766, + 0.319, + 0.779 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.39, + 0.766, + 0.465, + 0.779 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.766, + 0.553, + 0.777 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.631, + 0.766, + 0.728, + 0.777 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.796, + 0.871, + 0.894 + ], + "angle": 0, + "content": "Figure 43: Task: In-context visual prompting. The goal is to perform specific visual tasks on new query images based on task-specific example images and text instructions. Setup: Four representative tasks are evaluated: movie-shot generation, ray-tracing rendering, overlaid mask visualization, and maze solving. Each row includes example images, query images, and the corresponding outputs. Observations: GPT-4o excels in movie-shot generation and ray-tracing, producing coherent outputs but lacks consistency in visual semantics. It fails with overlaid mask visualization and maze solving, showing limits in complex task integration. While promising for in-context visual prompting, it needs refinement for more complex and reasoning-intensive tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "57" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.303, + 0.106 + ], + "angle": 0, + "content": "2.3 Image-to-3D Tasks" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.117, + 0.871, + 0.147 + ], + "angle": 0, + "content": "We evaluate the 3D understanding capabilities from 2D images of GPT-4o across three tasks: 2D image-to-3D modeling, 2D UV map-to-3D rendering, and novel view synthesis." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.16, + 0.338, + 0.175 + ], + "angle": 0, + "content": "2.3.1 Image to 3D modeling" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.184, + 0.872, + 0.269 + ], + "angle": 0, + "content": "Generating 3D models from monocular images boosts a wide range of applications, including augmented reality, virtual reality, and the gaming industry. This capability not only facilitates the content creation process but also mitigates the reliance on specialized 3D artists for creating 3D assets, which is more time- and cost-effective. Therefore, there is a growing research interest in generating 3D models from 2D images. Early methods on image-to-3D employ the learning-based approaches for single-view reconstruction [74, 77, 102, 79]. Recent works leverage the diffusion model prior to perform image-conditioned 3D generative modeling [69, 68, 83, 113]." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.273, + 0.872, + 0.427 + ], + "angle": 0, + "content": "In this section, we investigate the potential of GPT-4o for 3D modeling from 2D images. We begin by prompting GPT-4o to generate a Cinema 4D modeling interface to test its ability to produce coherent representations of structure, material, and wireframe based on the input image. As shown in Figure 44, GPT-4o can generate high-quality 3D model renderings within the application interface. Notably, the generated models exhibit clear wireframes and textures consistent with the input images. In contrast, Gemini 2.0 Flash and Midjourney v6.1 fail to achieve comparable results under the same conditions, which produce inconsistent renderings. We then prompt the GPT-4o to generate corresponding 3D object and material files in .obj and .mtl formats to further evaluate its understanding of the underlying structure in the rendered images. However, the output 3D models are coarse and inconsistent with input images, indicating that although GPT-4o can produce visually coherent 3D renderings, its capability to transform these into accurate and usable 3D object files remains limited. Additionally, Gemini 2.0 Flash and Midjourney v6.1 do not support exporting 3D models." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.44, + 0.357, + 0.456 + ], + "angle": 0, + "content": "2.3.2 UV Map to 3D rendering" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.463, + 0.87, + 0.547 + ], + "angle": 0, + "content": "UV maps are 2D images that store texture information for 3D models. In 3D modeling, geometric data is represented in 3D space, while texture data is defined in a 2D texture space. UV mapping is the process of projecting a 2D UV map onto a 3D model, accurately aligning texture with geometry. The UV mapping process can evaluate models' capability for 3D perception and spatial understanding. Moreover, this task has broad applications in design, helping to reduce the burden on designers to create product renderings from 2D maps manually and provide useful references." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.553, + 0.87, + 0.624 + ], + "angle": 0, + "content": "As shown in Figure 45, GPT-4o exhibits a superior ability to generate consistent 3D renderings from 2D maps compared to Gemini 2.0 Flash and Midjourney v6.1. However, some outputs remain unsatisfactory, displaying inconsistencies in patterns and structure (see row 3 in Figure 45). Gemini 2.0 Flash struggles to correctly wrap the 3D model, though it maintains pattern consistency. Midjourney v6.1 tends to introduce additional, imagined features, which reduce controllability in this task." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.637, + 0.331, + 0.652 + ], + "angle": 0, + "content": "2.3.3 Novel View Synthesis" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.661, + 0.872, + 0.759 + ], + "angle": 0, + "content": "From a monocular view, humans can imagine an object's 3D shape and appearance since humans have collected enough prior knowledge for different objects throughout their daily lives. This ability to infer novel views of objects is essential for a wide range of tasks, from object manipulation to artistic creation such as painting. Early works achieve image-to-3D reconstruction using category-specific priors or large-scale pre-training [45, 80, 87, 32, 131]. Recent studies have shown that large diffusion models contain rich 3D prior information of the visual world, enabling them to perform novel view synthesis [69, 68, 83, 70]. These novel views can then be used for zero-shot 3D reconstruction using different 3D representations such as NeRF [76], mesh, or SDF." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.765, + 0.872, + 0.85 + ], + "angle": 0, + "content": "In this section, we evaluate the ability of GPT-4o for novel view synthesis on objects with artistic styles and asymmetric geometry. As shown in Figure 46, for artistically styled objects, GPT-4o and Gemini 2.0 Flash largely preserve structural consistency with the input image, although they may change some elements or fine details. For the asymmetric object, GPT-4o can preserve the object scale and size better than Gemini 2.0 Flash. However, Midjourney v6.1 fails to generate consistent novel views, instead producing visually appealing images that do not align with the given prompt of this task." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "58" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.165, + 0.171, + 0.319, + 0.188 + ], + "angle": 0, + "content": "Image to 3D Model" + }, + { + "type": "title", + "bbox": [ + 0.254, + 0.203, + 0.768, + 0.221 + ], + "angle": 0, + "content": "Evaluation: Shape/texture consistency, wireframe plausibility." + }, + { + "type": "image", + "bbox": [ + 0.164, + 0.227, + 0.323, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.334, + 0.227, + 0.493, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.227, + 0.665, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.675, + 0.227, + 0.834, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.164, + 0.349, + 0.323, + 0.47 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.335, + 0.349, + 0.493, + 0.47 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.349, + 0.664, + 0.47 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.676, + 0.349, + 0.834, + 0.47 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.166, + 0.471, + 0.323, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.335, + 0.471, + 0.493, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.471, + 0.664, + 0.592 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.676, + 0.471, + 0.836, + 0.592 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.164, + 0.598, + 0.325, + 0.72 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.335, + 0.598, + 0.495, + 0.72 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.598, + 0.666, + 0.72 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.676, + 0.598, + 0.834, + 0.719 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.731, + 0.855, + 0.746 + ], + "angle": 0, + "content": "Input Text: \"Generate a pre-render view of a C4D model, including the UI, wireframe and material.\"" + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.753, + 0.282, + 0.768 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.385, + 0.752, + 0.437, + 0.765 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.525, + 0.752, + 0.642, + 0.765 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.698, + 0.751, + 0.811, + 0.766 + ], + "angle": 0, + "content": "Midjourney v6.1" + }, + { + "type": "image_caption", + "bbox": [ + 0.126, + 0.784, + 0.871, + 0.842 + ], + "angle": 0, + "content": "Figure 44: Task: Image-to-3D model rendering. Evaluate the 3D modeling ability given a 2D image. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o can generate better 3D model rendering with consistent shape, texture, and plausible wireframe than Gemini 2.0 Flash and Midjourney v6.1." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "59" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.162, + 0.16, + 0.377, + 0.178 + ], + "angle": 0, + "content": "2D UV map to 3D rendering" + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.194, + 0.355, + 0.211 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.356, + 0.198, + 0.69, + 0.215 + ], + "angle": 0, + "content": "Evaluation: Structure/pattern consistency." + }, + { + "type": "image", + "bbox": [ + 0.149, + 0.22, + 0.353, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.361, + 0.22, + 0.522, + 0.342 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.528, + 0.22, + 0.684, + 0.342 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.688, + 0.22, + 0.848, + 0.342 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.149, + 0.344, + 0.351, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.344, + 0.522, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.528, + 0.344, + 0.686, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.692, + 0.346, + 0.846, + 0.468 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.151, + 0.468, + 0.353, + 0.599 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.469, + 0.522, + 0.599 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.528, + 0.47, + 0.685, + 0.599 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.688, + 0.47, + 0.848, + 0.599 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.149, + 0.605, + 0.355, + 0.724 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.604, + 0.522, + 0.726 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.528, + 0.604, + 0.686, + 0.726 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.691, + 0.604, + 0.848, + 0.726 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.151, + 0.731, + 0.825, + 0.761 + ], + "angle": 0, + "content": "Input Text: \"Assemble this packaging cutout into a complete product and output a 3D rendered image.\"" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.764, + 0.29, + 0.779 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.395, + 0.763, + 0.449, + 0.776 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.538, + 0.763, + 0.656, + 0.777 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.704, + 0.763, + 0.818, + 0.779 + ], + "angle": 0, + "content": "Midjourney v6.1" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.796, + 0.872, + 0.853 + ], + "angle": 0, + "content": "Figure 45: Task: 2D UV map to 3D rendering. Evaluate the 3D perception and spatial understanding ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o can generate better 3D renderings based on 2D maps than Gemini 2.0 Flash and Midjourney v6.1. However, structure and pattern inconsistencies still exist among these three models." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "60" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.186, + 0.194, + 0.356, + 0.211 + ], + "angle": 0, + "content": "Novel View Synthesis" + }, + { + "type": "title", + "bbox": [ + 0.397, + 0.226, + 0.612, + 0.246 + ], + "angle": 0, + "content": "Evaluation: Consistency." + }, + { + "type": "image", + "bbox": [ + 0.167, + 0.25, + 0.255, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.263, + 0.251, + 0.457, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.46, + 0.252, + 0.644, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.65, + 0.251, + 0.848, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.168, + 0.369, + 0.255, + 0.479 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.264, + 0.37, + 0.456, + 0.479 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.459, + 0.37, + 0.644, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.651, + 0.37, + 0.846, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.168, + 0.483, + 0.255, + 0.584 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.264, + 0.483, + 0.457, + 0.584 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.459, + 0.484, + 0.644, + 0.585 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.65, + 0.484, + 0.846, + 0.586 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.168, + 0.589, + 0.255, + 0.689 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.264, + 0.588, + 0.457, + 0.689 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.459, + 0.587, + 0.644, + 0.691 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.65, + 0.589, + 0.846, + 0.69 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.31, + 0.698, + 0.675, + 0.714 + ], + "angle": 0, + "content": "Input Text: \"Generate three views of this picture.\"" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.719, + 0.268, + 0.735 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.351, + 0.721, + 0.404, + 0.734 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.72, + 0.62, + 0.734 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.685, + 0.719, + 0.798, + 0.735 + ], + "angle": 0, + "content": "Midjourney v6.1" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.761, + 0.87, + 0.818 + ], + "angle": 0, + "content": "Figure 46: Task: Novel view synthesis. Evaluate the 3D perception and spatial understanding ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o can generate better style and structure-consistent novel views for both artistic painting and asymmetric objects." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "61" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.293, + 0.106 + ], + "angle": 0, + "content": "2.4 Image-to-X Tasks" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.117, + 0.871, + 0.16 + ], + "angle": 0, + "content": "In this section, we further evaluate both GPT-4o and Gemini 2.0 Flash for several dense image understanding tasks, including segmentation-related tasks, depth estimation, normal estimation, matting, salient object detection, edge detection, layout detection, text detection, and object tracking." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.174, + 0.325, + 0.188 + ], + "angle": 0, + "content": "2.4.1 Image Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.197, + 0.872, + 0.295 + ], + "angle": 0, + "content": "Image segmentation tasks group pixels of the given image or video into semantic regions. It is a fundamental problem in computer vision and involves numerous real-world applications, such as robotics, automated surveillance, and image/video editing. With the development of recent deep learning methods, this domain has achieved rapid progress. Early works mainly adopt CNN-based methods with large kernels or respective fields. Recently, transformer-based methods have also worked well and surpassed previous CNN-based methods on various benchmarks. In particular, we test three segmentation tasks, including referring segmentation, semantic segmentation, and panoptic segmentation." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.301, + 0.87, + 0.455 + ], + "angle": 0, + "content": "Referring Segmentation. This task outputs the corresponding mask according to the input texts, and the goal is to test the pixel-level grounding ability of the model. In Figure 47, we compare GPT-4o, Gemini 2.0 Flash and recent state-of-the-art method, Sa2VA [117] (8B model \\(\\dagger\\)). We show five open-world test cases. For the first two cases, GPT-4o shows the coarse localization ability on the background region. For example, it can mark the grass region despite the unfavorable boundaries. However, compared to the SOTA method, Sa2VA, GPT-4o mistakenly merges both large regions. In the third row, both GPT-4o and Gemini 2.0 Flash cannot perform grounding with complex text inputs. In the fourth row, all models perform badly. GPT-4o generates an unseen chair in the images while Gemini 2.0 Flash performs image editing functions by replacing the smallest chair with a normal chair. Sa2VA also segments the wrong object (the nearest chair). In the last example, GPT-4o also cannot segment smaller objects (\"bag\"). For all examples, both GPT-4o and Gemini 2.0 Flash modify the image contents. These examples indicate that GPT-4o has weak pixel grounding ability." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.459, + 0.872, + 0.584 + ], + "angle": 0, + "content": "Semantic Segmentation. Semantic segmentation assigns each pixel a semantic label, which is one basic vision task. In Figure 48, we show several test cases on the semantic segmentation task. In particular, we adopt Deeplab-V3+ [14] (ResNet101 as backbone, trained on Pascal-Context) as one expert model for reference. Surprisingly, the mask quality of GPT-4o is good on four examples, even comparable with an expert model, Deeplab-V3+. During the testing, we find the texts may be randomly appended to the masks. This is why the first row differs from the remaining examples. For the second and third examples, GPT-4o misaligns the text and mask regions. Compared to Gemini 2.0 Flash, GPT-4o has a much stronger ability in semantic segmentation, particularly for mask shape. However, there is still a lot of room for this task, including a unified semantic segmentation format, enhanced text and mask alignments, and more correct mask labels." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.59, + 0.872, + 0.743 + ], + "angle": 0, + "content": "Panoptic Segmentation. This task assigns the foreground region a semantic label and assigns one mask label and one instance ID to each instance, which is a unified task format of semantic segmentation and instance segmentation. In Figure 49, we compare the panoptic segmentation ability of GPT-4o, Gemini 2.0 Flash, and one expert model, K-Net [123](trained on the COCO panoptic segmentation dataset, with ResNet50 as backbone). Overall, the mask shapes of GPT-4o are good. The model can understand the panoptic segmentation task, while the Gemini 2.0 Flash cannot do this task in the first and third cases. However, the spatial locations have been changed for all cases. The generated masks are in part-whole formats and are even finer-grained than K-Net. For example, in the first example, the jersey number (17) of the person and the hair of the people are also marked. Meanwhile, we also find a similar issue: several examples have text, while several do not have text, even though they adopt the same text prompt. In addition, GPT-4o can distinguish different instances with different colors, despite most of them not being good (see the last example)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.152, + 0.897, + 0.428, + 0.912 + ], + "angle": 0, + "content": "\\(\\dagger\\) https://huggingface.co/ByteDance/Sa2VA-8B" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "62" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.214, + 0.102, + 0.304, + 0.117 + ], + "angle": 0, + "content": "Image-to-X" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.131, + 0.786, + 0.148 + ], + "angle": 0, + "content": "Evaluation: Referring Expression Segmentation, Grounding and Grouping." + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.152, + 0.327, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.344, + 0.152, + 0.49, + 0.261 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.152, + 0.653, + 0.261 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.673, + 0.152, + 0.817, + 0.261 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.268, + 0.803, + 0.282 + ], + "angle": 0, + "content": "Input Text: \"Please segment the grass in the image and directly generate the output image.\"" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.286, + 0.329, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.286, + 0.491, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.286, + 0.657, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.673, + 0.287, + 0.818, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.404, + 0.793, + 0.418 + ], + "angle": 0, + "content": "Input Text: \"Please segment the sand in the image and directly generate the output image.\"" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.424, + 0.328, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.424, + 0.491, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.424, + 0.657, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.673, + 0.424, + 0.817, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.54, + 0.823, + 0.567 + ], + "angle": 0, + "content": "Input Text: \"Please segment the table beside the black sofa in the image and directly generate the output image.\"" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.571, + 0.328, + 0.684 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.571, + 0.486, + 0.684 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.571, + 0.655, + 0.684 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.673, + 0.571, + 0.817, + 0.684 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.687, + 0.778, + 0.701 + ], + "angle": 0, + "content": "Input Text: \"Please segment the smallest chair and directly generate the output image.\"" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.706, + 0.328, + 0.818 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.706, + 0.486, + 0.818 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.706, + 0.657, + 0.818 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.673, + 0.706, + 0.818, + 0.818 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.82, + 0.793, + 0.834 + ], + "angle": 0, + "content": "Input Text: \"Please segment the bag in the image and directly generate the output image.\"" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.843, + 0.296, + 0.856 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.394, + 0.843, + 0.444, + 0.855 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.843, + 0.638, + 0.855 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.722, + 0.843, + 0.77, + 0.855 + ], + "angle": 0, + "content": "Sa2VA" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.877, + 0.871, + 0.92 + ], + "angle": 0, + "content": "Figure 47: Task: Image to X: Referring expression segmentation. Evaluate the grounding and grouping ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Sa2VA [117]. Observation: These examples indicate that current GPT-4o has weak pixel-level grounding ability." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "63" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.204, + 0.149, + 0.3, + 0.166 + ], + "angle": 0, + "content": "Image-to-X" + }, + { + "type": "image", + "bbox": [ + 0.257, + 0.177, + 0.279, + 0.195 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.281, + 0.182, + 0.74, + 0.198 + ], + "angle": 0, + "content": "Evaluation: Semantic Segmentation, Shape and Grouping." + }, + { + "type": "image", + "bbox": [ + 0.162, + 0.2, + 0.314, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.199, + 0.49, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.199, + 0.655, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.676, + 0.199, + 0.83, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.32, + 0.717, + 0.336 + ], + "angle": 0, + "content": "Input Text: \"Please generate the semantic segmentation result of the image.\"" + }, + { + "type": "image", + "bbox": [ + 0.164, + 0.34, + 0.315, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.34, + 0.49, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.34, + 0.657, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.677, + 0.34, + 0.83, + 0.456 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.461, + 0.713, + 0.478 + ], + "angle": 0, + "content": "Input Text: \"Please generate the semantic segmentation result of the image.\"" + }, + { + "type": "image", + "bbox": [ + 0.164, + 0.485, + 0.314, + 0.601 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.486, + 0.49, + 0.601 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.486, + 0.657, + 0.601 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.676, + 0.486, + 0.83, + 0.601 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.605, + 0.713, + 0.621 + ], + "angle": 0, + "content": "Input Text: \"Please generate the semantic segmentation result of the image.\"" + }, + { + "type": "image", + "bbox": [ + 0.164, + 0.629, + 0.314, + 0.744 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.629, + 0.49, + 0.744 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.629, + 0.657, + 0.744 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.676, + 0.629, + 0.83, + 0.744 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.746, + 0.721, + 0.762 + ], + "angle": 0, + "content": "Input Text: \"Please generate the semantic segmentation result of the image.\"" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.766, + 0.278, + 0.782 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.379, + 0.766, + 0.432, + 0.779 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.766, + 0.636, + 0.779 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.704, + 0.766, + 0.794, + 0.781 + ], + "angle": 0, + "content": "Deeplab-V3+" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.806, + 0.872, + 0.863 + ], + "angle": 0, + "content": "Figure 48: Task: Image to X: Semantic segmentation. Evaluate the shape and grouping ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Deeplab-V3+ [14]. Observation: Compared with Gemin-2.0, the mask quality of GPT-4o is good. However, there are still huge gaps in the standard semantic segmentation format." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "64" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.212, + 0.1, + 0.304, + 0.116 + ], + "angle": 0, + "content": "Image-to-X" + }, + { + "type": "image", + "bbox": [ + 0.272, + 0.126, + 0.291, + 0.144 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.293, + 0.13, + 0.726, + 0.146 + ], + "angle": 0, + "content": "Evaluation: Panoptic Segmentation, Grouping and Shape." + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.152, + 0.327, + 0.265 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.344, + 0.151, + 0.491, + 0.264 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.152, + 0.655, + 0.264 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.672, + 0.152, + 0.818, + 0.265 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.268, + 0.698, + 0.282 + ], + "angle": 0, + "content": "Input Text: \"Please generate the panoptic segmentation result of the image.\"" + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.286, + 0.328, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.286, + 0.491, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.286, + 0.655, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.672, + 0.286, + 0.818, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.402, + 0.698, + 0.417 + ], + "angle": 0, + "content": "Input Text: \"Please generate the panoptic segmentation result of the image.\"" + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.424, + 0.328, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.424, + 0.49, + 0.536 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.424, + 0.657, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.672, + 0.424, + 0.814, + 0.536 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.539, + 0.698, + 0.554 + ], + "angle": 0, + "content": "Input Text: \"Please generate the panoptic segmentation result of the image.\"" + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.56, + 0.328, + 0.676 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.56, + 0.49, + 0.675 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.561, + 0.656, + 0.676 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.672, + 0.562, + 0.814, + 0.676 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.677, + 0.706, + 0.692 + ], + "angle": 0, + "content": "Input Text: \"Please generate the panoptic segmentation result of the image.\"" + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.697, + 0.328, + 0.807 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.696, + 0.49, + 0.807 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.697, + 0.656, + 0.807 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.672, + 0.697, + 0.814, + 0.807 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.81, + 0.706, + 0.825 + ], + "angle": 0, + "content": "Input Text: \"Please generate the panoptic segmentation result of the image.\"" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.833, + 0.297, + 0.847 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.394, + 0.833, + 0.444, + 0.845 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.833, + 0.638, + 0.845 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.725, + 0.833, + 0.768, + 0.845 + ], + "angle": 0, + "content": "K-Net" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.87, + 0.872, + 0.926 + ], + "angle": 0, + "content": "Figure 49: Task: Image to X: Panoptic segmentation. Evaluate the shape and grouping ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and K-Net [123]. Observation: GPT-4o can understand the panoptic segmentation task, while Gemini 2.0 Flash cannot do this task in the first and third cases." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "65" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.289, + 0.106 + ], + "angle": 0, + "content": "2.4.2 Edge Detection" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.115, + 0.871, + 0.173 + ], + "angle": 0, + "content": "Edge Detection. As a classic vision task, edge detection aims to identify the boundaries or edges of objects within an image. These edges represent the locations with significant changes in image intensity, color, or other visual features. Common edge detection operators include the Sobel, Prewitt, and Canny operators. Recent works adopt deep learning-based approaches." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.177, + 0.871, + 0.248 + ], + "angle": 0, + "content": "In Figure 50, we compare this ability with a recent SOTA deep learning based approach, EMDB [56]. For four examples, we find both GPT-4o and Gemini 2.0 Flash can detect object edges for both foreground and background objects. In addition, the details are even good using GPT-4o. We find two critical issues: 1) The spatial localization of GPT-4o is changed as observed by the segmentation tasks. 2) The content of GPT-4o is also changed. For example, in the first example, the road is generated, which does not exist in the input image." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.253, + 0.871, + 0.31 + ], + "angle": 0, + "content": "Image Matting. Image matting is a technique in image processing that aims to separate a foreground object from its background and obtain a detailed alpha matte, which indicates the transparency or opacity of each pixel in the foreground. It goes beyond simple segmentation by providing more precise information about the boundaries and fine details of the object, especially for complex objects like hair or smoke." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.315, + 0.871, + 0.399 + ], + "angle": 0, + "content": "In Figure 51, we show three testing examples, with one expert model, Matting Anything [53]. Compared with Gemini, GPT-4o can handle the simple cases, as shown in the third row. Thus, it can understand the task goal. For example, it can even keep the fine-grained details of a horse hair. However, considering the strict requirements of image matting (fine-grained and aligned details), the overall quality is bad. Compared with Matting Anything, both GPT-4o and Gemini work poorly. We find nearly the same issues: 1) Wrong spatial localization, 2) Changed contents." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "66" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.197, + 0.223, + 0.294, + 0.24 + ], + "angle": 0, + "content": "Image-to-X" + }, + { + "type": "title", + "bbox": [ + 0.336, + 0.253, + 0.686, + 0.271 + ], + "angle": 0, + "content": "Evaluation: Edge Detection, Shape Analysis." + }, + { + "type": "image", + "bbox": [ + 0.146, + 0.277, + 0.328, + 0.37 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.328, + 0.278, + 0.481, + 0.37 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.481, + 0.278, + 0.664, + 0.37 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.664, + 0.277, + 0.844, + 0.371 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.162, + 0.381, + 0.784, + 0.398 + ], + "angle": 0, + "content": "Input Text: \"Please detect the edge of object in this image and output the final image.\"" + }, + { + "type": "image", + "bbox": [ + 0.145, + 0.408, + 0.33, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.33, + 0.408, + 0.461, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.461, + 0.408, + 0.649, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.654, + 0.408, + 0.845, + 0.507 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.162, + 0.509, + 0.784, + 0.525 + ], + "angle": 0, + "content": "Input Text: \"Please detect the edge of object in this image and output the final image.\"" + }, + { + "type": "image", + "bbox": [ + 0.145, + 0.526, + 0.317, + 0.66 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.317, + 0.525, + 0.489, + 0.66 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.492, + 0.526, + 0.664, + 0.66 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.526, + 0.839, + 0.66 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.161, + 0.667, + 0.784, + 0.684 + ], + "angle": 0, + "content": "Input Text: \"Please detect the edge of object in this image and output the final image.\"" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.698, + 0.277, + 0.714 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.379, + 0.698, + 0.432, + 0.712 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.698, + 0.637, + 0.712 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.728, + 0.698, + 0.774, + 0.712 + ], + "angle": 0, + "content": "EDMB" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.743, + 0.87, + 0.787 + ], + "angle": 0, + "content": "Figure 50: Task: Image to X: Edge detection. Evaluate the shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and EDMB [56]. Observation: We find both GPT-4o and Gemini 2.0 Flash can detect object edges for both foreground and background objects." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "67" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.197, + 0.189, + 0.294, + 0.206 + ], + "angle": 0, + "content": "Image-to-X" + }, + { + "type": "image", + "bbox": [ + 0.294, + 0.216, + 0.314, + 0.233 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.315, + 0.219, + 0.705, + 0.236 + ], + "angle": 0, + "content": "Evaluation: Image Matting, Grouping and Shape." + }, + { + "type": "image", + "bbox": [ + 0.166, + 0.244, + 0.321, + 0.363 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.244, + 0.493, + 0.362 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.244, + 0.663, + 0.362 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.682, + 0.244, + 0.836, + 0.362 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.365, + 0.831, + 0.394 + ], + "angle": 0, + "content": "Input Text: \"Please Please matting the foreground and remove the background. Please directly generate the output image.\"" + }, + { + "type": "image", + "bbox": [ + 0.166, + 0.398, + 0.321, + 0.518 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.398, + 0.48, + 0.517 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.398, + 0.664, + 0.517 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.398, + 0.834, + 0.517 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.518, + 0.833, + 0.546 + ], + "angle": 0, + "content": "Input Text: \"Please Please matting the foreground and remove the background. Please directly generate the output image.\"" + }, + { + "type": "image", + "bbox": [ + 0.166, + 0.55, + 0.317, + 0.67 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.557, + 0.5, + 0.66 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.55, + 0.66, + 0.669 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.551, + 0.834, + 0.67 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.164, + 0.671, + 0.831, + 0.7 + ], + "angle": 0, + "content": "Input Text: \"Please Please matting the foreground and remove the background. Please directly generate the output image.\"" + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.704, + 0.287, + 0.72 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.388, + 0.704, + 0.441, + 0.717 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.528, + 0.704, + 0.645, + 0.717 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.728, + 0.696, + 0.79, + 0.725 + ], + "angle": 0, + "content": "Matting Anything" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.75, + 0.87, + 0.82 + ], + "angle": 0, + "content": "Figure 51: Task: Image to X: Image matting. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Matting Anything [54]. Observation: Compared with Gemini, GPT-4o can handle the simple cases, as shown in the third row. However, considering the strict requirements of image matting (fine-grained and aligned details), the overall quality is bad." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "68" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.283, + 0.107 + ], + "angle": 0, + "content": "2.4.3 Salient Object" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.115, + 0.871, + 0.145 + ], + "angle": 0, + "content": "Salient Object Detection. Salient object detection is a crucial technique in the field of computer vision and image processing. It aims to identify and locate the most visually prominent objects within an image or a video sequence." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.15, + 0.869, + 0.234 + ], + "angle": 0, + "content": "In Figure 52, we adopt one expert model, BiRefNet [127], as reference. For all examples, compared with Gemini 2.0 Flash, GPT-4o can detect relevant salient objects with the text prompts while Gemini can not achieve this. The second example shows that the GPT-4o can generate the aligned salient masks. However, for other examples, the spatial location is not changed where the results are generated according to the input image and potential classes. In the last examples, GPT-4o cannot generate multiple salient object masks, which is also a limitation when dealing with multiple objects." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.24, + 0.868, + 0.269 + ], + "angle": 0, + "content": "Mirror Detection. Mirror detection is a task in computer vision that focuses on identifying mirror surfaces within an image or a scene. Previous works explore this direction by adopting visual cues and geometric cues." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.274, + 0.869, + 0.358 + ], + "angle": 0, + "content": "In Figure 53, we also explore this ability for both GPT-4o and Gemini 2.0 Flash. As for comparison, we adopt a recent SOTA expert model, VMD [107]. For simple cases, we find that GPT-4o can carry out mirror detection, as shown in the first example. For the complex scene, it cannot work as well as the expert model, VMD. As shown in the second example, it generates a fake mirror and leads to a wrong image output with a line to mark the boundaries of the fake mirror. As shown in the last row, GPT-4o treats several rectangular objects as mirrors, leading to several false positive examples." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.364, + 0.869, + 0.406 + ], + "angle": 0, + "content": "**Shadow Detection.** Shadow detection is a significant process in computer vision and image processing that aims to identify and localize shadow regions in an image or a video. This technique is crucial, as shadows can otherwise disrupt object detection, recognition, and scene analysis." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.412, + 0.869, + 0.482 + ], + "angle": 0, + "content": "In Figure 54, we compare and test this ability for GPT-4o. We adopt the SOTA model, SDDNet [21] for reference. For the simple examples (single objects and no objects in the image), both GPT-4o and Gemini can localize the shadow, as shown in the first two rows. For more complex examples, both models detect both objects and their shadows with one mask output, as shown in the last two rows. Thus, GPT-4o cannot handle these inputs. In addition, the spatial misalignments also happen for all the cases." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.488, + 0.869, + 0.543 + ], + "angle": 0, + "content": "Camouflage Object Detection. Camouflage object detection is a challenging task in computer vision. It aims to identify objects that are designed to blend into their backgrounds, making them difficult to distinguish by human eyes or traditional detection methods. This has a wide application for the military, security, and wildlife conservation." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.549, + 0.869, + 0.62 + ], + "angle": 0, + "content": "As shown in Figure 55, we also include one expert model, BiRefNet [127] for reference. For all examples, both GPT-4o and Gemini 2.0 Flash can detect and segment the camouflage animals for simple cases, as shown in the last two rows. GPT-4o can also detect the specific object, given the text prompt, as shown in the first row. However, the same misalignment issues still exist. In addition, it also mixes segmentation maps (in binary masks or color masks), as shown in the last row." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "69" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.202, + 0.104, + 0.298, + 0.12 + ], + "angle": 0, + "content": "Image-to-X" + }, + { + "type": "image", + "bbox": [ + 0.255, + 0.135, + 0.274, + 0.152 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.278, + 0.139, + 0.743, + 0.156 + ], + "angle": 0, + "content": "Evaluation: Salient Object Detection, Grouping and Shape." + }, + { + "type": "image", + "bbox": [ + 0.156, + 0.163, + 0.326, + 0.261 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.331, + 0.164, + 0.499, + 0.261 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.163, + 0.676, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.682, + 0.164, + 0.85, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.164, + 0.264, + 0.821, + 0.294 + ], + "angle": 0, + "content": "Input Text: \"Give me the segmentation map of the most salient objects in this image. Return resulting image by using image generation.\"" + }, + { + "type": "image", + "bbox": [ + 0.157, + 0.294, + 0.325, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.331, + 0.294, + 0.498, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.294, + 0.674, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.682, + 0.294, + 0.85, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.399, + 0.823, + 0.428 + ], + "angle": 0, + "content": "Input Text: \"Give me the segmentation map of the most salient objects in this image. Return resulting image by using image generation.\"" + }, + { + "type": "image", + "bbox": [ + 0.157, + 0.429, + 0.323, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.429, + 0.498, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.429, + 0.674, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.682, + 0.429, + 0.85, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.164, + 0.517, + 0.821, + 0.546 + ], + "angle": 0, + "content": "Input Text: \"Give me the segmentation map of the most salient objects in this image. Return resulting image by using image generation.\"" + }, + { + "type": "image", + "bbox": [ + 0.157, + 0.547, + 0.323, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.547, + 0.499, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.547, + 0.674, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.682, + 0.547, + 0.851, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.164, + 0.645, + 0.821, + 0.676 + ], + "angle": 0, + "content": "Input Text: \"Give me the segmentation map of the most salient objects in this image. Return resulting image by using image generation.\"" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.682, + 0.289, + 0.697 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.39, + 0.682, + 0.443, + 0.694 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.531, + 0.682, + 0.65, + 0.695 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.729, + 0.682, + 0.798, + 0.695 + ], + "angle": 0, + "content": "BiRefNet" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.728, + 0.872, + 0.784 + ], + "angle": 0, + "content": "Figure 52: Task: Image to X: Salient object detection. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and BiRefNet [127]. Observation: For all examples, compared with Gemini, GPT-4o can detect related salient objects with the text prompts while Gemini can not achieve this function." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "70" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.197, + 0.216, + 0.294, + 0.232 + ], + "angle": 0, + "content": "Image-to-X" + }, + { + "type": "image", + "bbox": [ + 0.286, + 0.242, + 0.308, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.309, + 0.246, + 0.713, + 0.263 + ], + "angle": 0, + "content": "Evaluation: Mirror Detection, Grouping and Shape." + }, + { + "type": "image", + "bbox": [ + 0.166, + 0.271, + 0.318, + 0.388 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.27, + 0.495, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.27, + 0.663, + 0.388 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.272, + 0.832, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.165, + 0.392, + 0.842, + 0.409 + ], + "angle": 0, + "content": "Input Text: \"Please segment all the mirror in the image and directly generate the output image.\"" + }, + { + "type": "image", + "bbox": [ + 0.166, + 0.414, + 0.317, + 0.532 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.414, + 0.494, + 0.532 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.414, + 0.663, + 0.532 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.414, + 0.832, + 0.532 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.166, + 0.534, + 0.845, + 0.549 + ], + "angle": 0, + "content": "Input Text: \"Please segment all the mirror in the image and directly generate the output image.\"" + }, + { + "type": "image", + "bbox": [ + 0.166, + 0.555, + 0.316, + 0.673 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.555, + 0.494, + 0.672 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.555, + 0.66, + 0.672 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.555, + 0.832, + 0.672 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.166, + 0.675, + 0.845, + 0.689 + ], + "angle": 0, + "content": "Input Text: \"Please segment all the mirror in the image and directly generate the output image.\"" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.696, + 0.287, + 0.711 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.388, + 0.696, + 0.441, + 0.708 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.528, + 0.696, + 0.645, + 0.708 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.741, + 0.695, + 0.777, + 0.707 + ], + "angle": 0, + "content": "VMD" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.737, + 0.872, + 0.793 + ], + "angle": 0, + "content": "Figure 53: Task: Image to X: Mirror detection. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and VMD [107]. Observation: For simple cases, we find that GPT-4o can carry out mirror detection, as shown in the first example. For the complex scene, it cannot work as well as VMD." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.507, + 0.948 + ], + "angle": 0, + "content": "71" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.108, + 0.303, + 0.124 + ], + "angle": 0, + "content": "Image-to-X" + }, + { + "type": "title", + "bbox": [ + 0.305, + 0.141, + 0.717, + 0.158 + ], + "angle": 0, + "content": "Evaluation: Shadow Detection, Grouping and Shape." + }, + { + "type": "image", + "bbox": [ + 0.165, + 0.162, + 0.329, + 0.247 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.162, + 0.501, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.162, + 0.672, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.679, + 0.162, + 0.844, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.249, + 0.82, + 0.291 + ], + "angle": 0, + "content": "Input Text: \"Give me with the segmentation map of the shadow in this image. Set the shadow region to white and the other regions to black. Return the resulting image using image generation.\"" + }, + { + "type": "image", + "bbox": [ + 0.165, + 0.293, + 0.328, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.293, + 0.5, + 0.46 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.293, + 0.672, + 0.46 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.679, + 0.293, + 0.842, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.461, + 0.82, + 0.503 + ], + "angle": 0, + "content": "Input Text: \"Give me with the segmentation map of the shadow in this image. Set the shadow region to white and the other regions to black. Return the resulting image using image generation.\"" + }, + { + "type": "image", + "bbox": [ + 0.166, + 0.505, + 0.33, + 0.672 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.505, + 0.5, + 0.672 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.505, + 0.672, + 0.672 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.679, + 0.505, + 0.842, + 0.672 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.673, + 0.821, + 0.715 + ], + "angle": 0, + "content": "Input Text: \"Give me with the segmentation map of the shadow in this image. Set the shadow region to white and the other regions to black. Return the resulting image using image generation.\"" + }, + { + "type": "image", + "bbox": [ + 0.165, + 0.717, + 0.33, + 0.79 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.718, + 0.5, + 0.79 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.718, + 0.672, + 0.79 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.679, + 0.719, + 0.842, + 0.79 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.792, + 0.82, + 0.834 + ], + "angle": 0, + "content": "Input Text: \"Give me with the segmentation map of the shadow in this image. Set the shadow region to white and the other regions to black. Return the resulting image using image generation.\"" + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.836, + 0.295, + 0.851 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.395, + 0.836, + 0.448, + 0.849 + ], + "angle": 0, + "content": "GPT-40" + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.836, + 0.648, + 0.849 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.731, + 0.836, + 0.79, + 0.848 + ], + "angle": 0, + "content": "SDDNet" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.872, + 0.872, + 0.929 + ], + "angle": 0, + "content": "Figure 54: Task: Image to X: Shadow detection. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SDDNet [21]. Observation: For more complex examples, both models detect both objects and their shadows with one mask output, as shown in the last two rows, leading to false positive predictions." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "72" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.202, + 0.171, + 0.298, + 0.187 + ], + "angle": 0, + "content": "Image-to-X" + }, + { + "type": "image", + "bbox": [ + 0.234, + 0.203, + 0.256, + 0.22 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.259, + 0.207, + 0.763, + 0.223 + ], + "angle": 0, + "content": "Evaluation: Camouflage Object Detection, Grouping and Shape." + }, + { + "type": "image", + "bbox": [ + 0.156, + 0.232, + 0.325, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.232, + 0.498, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.232, + 0.672, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.683, + 0.232, + 0.849, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.165, + 0.329, + 0.809, + 0.358 + ], + "angle": 0, + "content": "Input Text: \"Give me the segmentation map of the crocodile in this image. Return resulting image by using image generation.\"" + }, + { + "type": "image", + "bbox": [ + 0.157, + 0.363, + 0.323, + 0.461 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.363, + 0.498, + 0.461 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.363, + 0.672, + 0.461 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.683, + 0.363, + 0.849, + 0.461 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.165, + 0.466, + 0.818, + 0.496 + ], + "angle": 0, + "content": "Input Text: \"Give me the segmentation map of the fish in this image. Return resulting image by using image generation.\"" + }, + { + "type": "image", + "bbox": [ + 0.157, + 0.498, + 0.323, + 0.559 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.498, + 0.498, + 0.559 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.498, + 0.672, + 0.559 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.683, + 0.498, + 0.849, + 0.559 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.165, + 0.564, + 0.818, + 0.594 + ], + "angle": 0, + "content": "Input Text: \"Give me the segmentation map of the fish in this image. Return resulting image by using image generation.\"" + }, + { + "type": "image", + "bbox": [ + 0.157, + 0.596, + 0.324, + 0.693 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.596, + 0.5, + 0.693 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.596, + 0.672, + 0.693 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.683, + 0.596, + 0.849, + 0.693 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.165, + 0.698, + 0.821, + 0.728 + ], + "angle": 0, + "content": "Input Text: \"Give me the segmentation map of the toad in this image. Return resulting image by using image generation.\"" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.736, + 0.288, + 0.751 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.39, + 0.736, + 0.444, + 0.748 + ], + "angle": 0, + "content": "GPT-40" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.736, + 0.649, + 0.748 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.729, + 0.736, + 0.797, + 0.748 + ], + "angle": 0, + "content": "BiRefNet" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.784, + 0.87, + 0.84 + ], + "angle": 0, + "content": "Figure 55: Task: Image to X: Camouflage object detection. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and BiRefNet [127]. Observation: Both GPT-4o and Gemini 2.0 Flash can detect and segment the camouflage animals for simple cases. However, the spatial misalignments still exist." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "73" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.307, + 0.108 + ], + "angle": 0, + "content": "2.4.4 Depth Estimation" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.115, + 0.874, + 0.325 + ], + "angle": 0, + "content": "The depth estimation task involves predicting the distance from the camera to objects within a scene. In this paper, we focus on monocular depth estimation, which takes a single image as input. In Figure 56, we compare GPT-4o, Gemini 2.0 Flash, and a recent SOTA method, Depth-Anything [114]. We first notice that Gemini cannot produce reasonable depth estimations. For GPT-4o, although it can output a fancy depth map visualization, we want to point out that this output is a grayscale visualization of depth estimation and cannot be directly converted to the depth of each pixel. We show mainly five cases. In the first test case, we notice that GPT-4o is good at capturing details in images, which Depth-Anything may not be good at. Although we cannot directly determine the accuracy of the depth value, we can judge from the visualization that the depth relationship between objects is accurate. What GPT-4o cannot do well is the background. Since the background in the image is the sky, we can infer from common sense that these areas are infinitely far away from the camera. However, the depth map output of GPT-4o does not handle these areas correctly. GPT-4o performs similarly in the second, fourth, and fifth examples. Among them, we would like to emphasize the fourth test case, since for buildings farther away, GPT-4o has no way to effectively analyze the distance between each building and the camera. In the third example, although the output of GPT-4o is very confusing, it completely misunderstands the depth relationship of the entire image. Therefore, we believe that the depth estimation performance of GPT-4o is still unstable." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "74" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.185, + 0.15, + 0.284, + 0.166 + ], + "angle": 0, + "content": "Image-to-X" + }, + { + "type": "image", + "bbox": [ + 0.349, + 0.176, + 0.371, + 0.195 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.373, + 0.18, + 0.629, + 0.198 + ], + "angle": 0, + "content": "Evaluation: Depth Estimation" + }, + { + "type": "image", + "bbox": [ + 0.141, + 0.204, + 0.314, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.322, + 0.204, + 0.495, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.204, + 0.679, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.684, + 0.204, + 0.857, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.159, + 0.294, + 0.694, + 0.311 + ], + "angle": 0, + "content": "Input Text: \"Please generate the depth map prediction of this image.\"" + }, + { + "type": "image", + "bbox": [ + 0.141, + 0.316, + 0.314, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.322, + 0.316, + 0.493, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.316, + 0.677, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.685, + 0.316, + 0.856, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.406, + 0.694, + 0.422 + ], + "angle": 0, + "content": "Input Text: \"Please generate the depth map prediction of this image.\"" + }, + { + "type": "image", + "bbox": [ + 0.142, + 0.426, + 0.314, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.323, + 0.427, + 0.495, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.427, + 0.677, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.685, + 0.427, + 0.857, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.159, + 0.518, + 0.694, + 0.535 + ], + "angle": 0, + "content": "Input Text: \"Please generate the depth map prediction of this image.\"" + }, + { + "type": "image", + "bbox": [ + 0.142, + 0.538, + 0.314, + 0.623 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.323, + 0.538, + 0.494, + 0.623 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.538, + 0.677, + 0.622 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.685, + 0.538, + 0.857, + 0.623 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.624, + 0.696, + 0.64 + ], + "angle": 0, + "content": "Input Text: \"Please generate the depth map prediction of this image.\"" + }, + { + "type": "image", + "bbox": [ + 0.142, + 0.644, + 0.315, + 0.733 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.323, + 0.645, + 0.494, + 0.733 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.645, + 0.677, + 0.733 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.685, + 0.645, + 0.856, + 0.733 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.734, + 0.698, + 0.751 + ], + "angle": 0, + "content": "Input Text: \"Please generate the depth map prediction of this image.\"" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.758, + 0.28, + 0.775 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.761, + 0.441, + 0.775 + ], + "angle": 0, + "content": "GPT-40" + }, + { + "type": "text", + "bbox": [ + 0.526, + 0.761, + 0.653, + 0.776 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.709, + 0.761, + 0.834, + 0.777 + ], + "angle": 0, + "content": "Depth-Anything" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.791, + 0.871, + 0.862 + ], + "angle": 0, + "content": "Figure 56: Task: Image to X: Depth estimation. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Depth-Anything [114]. Observation: We convert the depth map generated by Depth-Anything into a visualization map similar to GPT-4o. This evaluation shows that GPT-4o has the capability of distinguishing the depth relationship of different parts in the image, but its understanding of the background is insufficient." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "75" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.317, + 0.105 + ], + "angle": 0, + "content": "2.4.5 Normal Estimation" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.115, + 0.871, + 0.228 + ], + "angle": 0, + "content": "The surface normal estimation task involves predicting the orientation of surfaces at each pixel in an image, typically represented as 3D vectors. In Figure 57, we compare GPT-4o, Gemini 2.0 Flash, and Marigold normals [48]. The results show that GPT-4o can generate reasonable results. However, since GPT-4o's output is an appealing normal map visualization, we want to clarify that this output is a color-coded visualization and does not directly provide the exact normal vector for each pixel. Thus, we cannot use lighting or other methods to verify the accuracy of the normal maps, and downstream tasks cannot use the output results. However, we also find some unreasonable details. In the third test case, common sense suggests that the ground should be flat, but GPT-4o predicts normals for these textured areas that differ from the surrounding areas." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.253, + 0.312, + 0.27 + ], + "angle": 0, + "content": "Image-to-X" + }, + { + "type": "title", + "bbox": [ + 0.377, + 0.284, + 0.65, + 0.3 + ], + "angle": 0, + "content": "Evaluation: Consistency/accuracy." + }, + { + "type": "image", + "bbox": [ + 0.147, + 0.302, + 0.307, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.147, + 0.439, + 0.308, + 0.611 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.149, + 0.613, + 0.306, + 0.761 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.325, + 0.302, + 0.482, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.325, + 0.439, + 0.482, + 0.611 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.326, + 0.613, + 0.482, + 0.762 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.302, + 0.658, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.44, + 0.658, + 0.611 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.613, + 0.658, + 0.762 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.678, + 0.301, + 0.837, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.679, + 0.44, + 0.837, + 0.611 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.679, + 0.613, + 0.837, + 0.763 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.267, + 0.767, + 0.721, + 0.782 + ], + "angle": 0, + "content": "Input Text: \"Generate the surface normal map of this picture.\"" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.793, + 0.277, + 0.809 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.366, + 0.793, + 0.42, + 0.807 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.793, + 0.627, + 0.807 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.728, + 0.793, + 0.793, + 0.809 + ], + "angle": 0, + "content": "Marigold" + }, + { + "type": "image_caption", + "bbox": [ + 0.126, + 0.835, + 0.871, + 0.892 + ], + "angle": 0, + "content": "Figure 57: Task: Image to X: Normal estimation. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Marigold [48]. Observation: This evaluation shows that GPT-4o has the capability of generating a visualization map of the surface normal, but the understanding of the details is still insufficient." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "76" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.303, + 0.106 + ], + "angle": 0, + "content": "2.4.6 Layout Detection" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.115, + 0.871, + 0.199 + ], + "angle": 0, + "content": "The layout detection task requires the model to identify structural components (e.g., titles, paragraphs, tables, images) in the given image. In Figure 58, we compare the performance of GPT-4o, Gemini 2.0 Flash, and LayoutLMV3 [44] on the layout detection task. In the test cases, GPT-4o hallucinates layout elements that do not exist, although the final output is another document with \"layout detection\" results. If we consider the use in downstream tasks, such results are meaningless. Therefore, we conclude that GPT-4o is not capable of the layout detection task." + }, + { + "type": "title", + "bbox": [ + 0.192, + 0.221, + 0.29, + 0.237 + ], + "angle": 0, + "content": "Image-to-X" + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.248, + 0.374, + 0.265 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.377, + 0.252, + 0.644, + 0.267 + ], + "angle": 0, + "content": "Evaluation: Document Detection." + }, + { + "type": "image", + "bbox": [ + 0.148, + 0.277, + 0.318, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.334, + 0.277, + 0.465, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.48, + 0.277, + 0.642, + 0.39 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.655, + 0.277, + 0.848, + 0.39 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.149, + 0.419, + 0.858, + 0.435 + ], + "angle": 0, + "content": "Input Text: \"Generate a new image which contains the layout detection results of the input image.\"" + }, + { + "type": "image", + "bbox": [ + 0.149, + 0.451, + 0.313, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.318, + 0.45, + 0.482, + 0.529 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.485, + 0.45, + 0.665, + 0.53 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.45, + 0.853, + 0.529 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.149, + 0.534, + 0.857, + 0.55 + ], + "angle": 0, + "content": "Input Text: \"Generate a new image which contains the layout detection results of the input image.\"" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.559, + 0.272, + 0.574 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.559, + 0.429, + 0.571 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.559, + 0.636, + 0.571 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.714, + 0.559, + 0.804, + 0.573 + ], + "angle": 0, + "content": "LayoutLMV3" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.595, + 0.871, + 0.638 + ], + "angle": 0, + "content": "Figure 58: Task: Image to X: Layout detection. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and LayoutLMV3 [44]. Observation: The results show that GPT-4o and Gemini frequently generate a different document but a correct detected layout." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "77" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.283, + 0.106 + ], + "angle": 0, + "content": "2.4.7 Text Detection" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.115, + 0.87, + 0.158 + ], + "angle": 0, + "content": "The text detection task requires the model to detect the texts in the given image. In Figure 59, we compare the performance of GPT-4o, Gemini 2.0 Flash [99], and CRAFT [3] regarding to text detection. We observe that CRAFT exhibits better performance compared to the other models." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.164, + 0.87, + 0.234 + ], + "angle": 0, + "content": "In the first test case, GPT-4o demonstrates comparable performance to CRAFT. However, in other cases, GPT-4o continuously generates some nonexistent texts and labels them as \"text area\". This issue becomes particularly evident in cluttered scenes or images with complex backgrounds. These false positives not only reduce detection precision but also make the output less reliable for downstream tasks such as OCR or document understanding. On the other hand, Gemini does not generate nonexistent texts but tends to over-predict some areas as text areas." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "78" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.197, + 0.137, + 0.294, + 0.155 + ], + "angle": 0, + "content": "Image-to-X" + }, + { + "type": "image", + "bbox": [ + 0.381, + 0.162, + 0.401, + 0.179 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.404, + 0.166, + 0.62, + 0.182 + ], + "angle": 0, + "content": "Evaluation: Text Detection." + }, + { + "type": "image", + "bbox": [ + 0.153, + 0.19, + 0.321, + 0.291 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.322, + 0.191, + 0.51, + 0.291 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.192, + 0.681, + 0.291 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.682, + 0.192, + 0.849, + 0.29 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.299, + 0.823, + 0.316 + ], + "angle": 0, + "content": "Input Text: \"Generate a new image and label each line of text in the image with a green box\"" + }, + { + "type": "image", + "bbox": [ + 0.155, + 0.333, + 0.331, + 0.411 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.333, + 0.487, + 0.411 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.488, + 0.333, + 0.664, + 0.411 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.334, + 0.846, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.415, + 0.822, + 0.431 + ], + "angle": 0, + "content": "Input Text: \"Generate a new image and label each line of text in the image with a green box\"" + }, + { + "type": "image", + "bbox": [ + 0.155, + 0.439, + 0.328, + 0.616 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.329, + 0.439, + 0.504, + 0.616 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.439, + 0.676, + 0.616 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.676, + 0.439, + 0.852, + 0.616 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.627, + 0.823, + 0.644 + ], + "angle": 0, + "content": "Input Text: \"Generate a new image and label each line of text in the image with a green box\"" + }, + { + "type": "image", + "bbox": [ + 0.155, + 0.655, + 0.348, + 0.763 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.669, + 0.483, + 0.745 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.489, + 0.655, + 0.67, + 0.757 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.67, + 0.656, + 0.845, + 0.757 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.162, + 0.77, + 0.823, + 0.786 + ], + "angle": 0, + "content": "Input Text: \"Generate a new image and label each line of text in the image with a green box\"" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.792, + 0.279, + 0.808 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.38, + 0.792, + 0.434, + 0.806 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.792, + 0.639, + 0.806 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.728, + 0.792, + 0.78, + 0.806 + ], + "angle": 0, + "content": "CRAFT" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.83, + 0.871, + 0.873 + ], + "angle": 0, + "content": "Figure 59: Task: Image to X: Text detection. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and CRAFT [3]. Observation: The results show that GPT-4o frequently generates text that does not exist." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "79" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.092, + 0.296, + 0.108 + ], + "angle": 0, + "content": "2.4.8 Object Tracking" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.115, + 0.872, + 0.283 + ], + "angle": 0, + "content": "The object tracking task requires the model to continuously locate and follow the specific object across the frames in a video sequence. We test the multi-object tracking, which requires the model to track several objects concurrently. We test four cases (Figure 60, 61, 62, 63). We compare GPT-4o, Gemini 2.0 Flash, and a recent SOTA method SAM-2 [86]. Our first observation is that GPT-4o seems unable to generate images that are consistent with the original image. This may be related to the nature of its generative model. Even if we ignore this, for the tracking task, SAM-2 still performs better, while GPT-4o will have problems such as failing to maintain consistent tracking of the target, frequently drifting, or losing the object entirely. In Figure 60, the output of GPT-4o generally demonstrates the ability to track objects, but there are also some defects. For example, a new object is even created out of the existing objects in the last picture generated by GPT-4o. We speculate that this is caused by the influence of the conversation context. In Figure 61, GPT-4o outputs some content that should not be in the output, such as the \"caf\" tag. In Figure 62, GPT-4o can track a relatively simple object, but it fuses two separate objects. In Figure 63, GPT-4o lacks the capability of tracking in the dense scenario." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "80" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.199, + 0.1, + 0.296, + 0.116 + ], + "angle": 0, + "content": "Image-to-X" + }, + { + "type": "title", + "bbox": [ + 0.252, + 0.131, + 0.741, + 0.151 + ], + "angle": 0, + "content": "Evaluation: Object Tracking, Matching and Video Analysis." + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.156, + 0.318, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.157, + 0.49, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.157, + 0.666, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.683, + 0.157, + 0.839, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.279, + 0.827, + 0.323 + ], + "angle": 0, + "content": "Input Text: \"This is the first frame of a video where I've marked four targets with different colored bounding boxes. I'll subsequently provide you with other frames from this video for object tracking of these four targets. Understood?\"" + }, + { + "type": "image", + "bbox": [ + 0.162, + 0.328, + 0.321, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.328, + 0.491, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.328, + 0.666, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.683, + 0.328, + 0.839, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.452, + 0.808, + 0.496 + ], + "angle": 0, + "content": "Input Text: \"You now need to perform object tracking on the four targets in this image and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + }, + { + "type": "image", + "bbox": [ + 0.162, + 0.499, + 0.32, + 0.62 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.499, + 0.49, + 0.62 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.499, + 0.666, + 0.62 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.683, + 0.5, + 0.839, + 0.62 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.624, + 0.82, + 0.668 + ], + "angle": 0, + "content": "Input Text: \"Continue tracking the four targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + }, + { + "type": "image", + "bbox": [ + 0.162, + 0.672, + 0.32, + 0.792 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.672, + 0.49, + 0.792 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.672, + 0.666, + 0.792 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.683, + 0.672, + 0.839, + 0.792 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.801, + 0.82, + 0.845 + ], + "angle": 0, + "content": "Input Text: \"Continue tracking the four targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.851, + 0.284, + 0.867 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.384, + 0.851, + 0.437, + 0.864 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.528, + 0.851, + 0.646, + 0.864 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.746, + 0.851, + 0.791, + 0.863 + ], + "angle": 0, + "content": "SAM2" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.89, + 0.871, + 0.947 + ], + "angle": 0, + "content": "Figure 60: Task: Image to X: Object tracking, matching, and video analysis (1/4). Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SAM-2 [86]. Observation: This evaluation shows that GPT-4o has the capability of tracking objects, but it cannot generate a consistent image compared to the input image." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.507, + 0.948 + ], + "angle": 0, + "content": "81" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.199, + 0.184, + 0.296, + 0.201 + ], + "angle": 0, + "content": "Image-to-X" + }, + { + "type": "title", + "bbox": [ + 0.252, + 0.213, + 0.742, + 0.232 + ], + "angle": 0, + "content": "Evaluation: Object Tracking, Matching and Video Analysis." + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.242, + 0.321, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.242, + 0.493, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.242, + 0.669, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.68, + 0.242, + 0.842, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.317, + 0.813, + 0.36 + ], + "angle": 0, + "content": "Input Text: \"This is the first frame of a video where I've marked three targets with different colored bounding boxes. I'll subsequently provide you with other frames from this video for object tracking of these three targets. Understood?\"" + }, + { + "type": "image", + "bbox": [ + 0.162, + 0.364, + 0.321, + 0.435 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.364, + 0.49, + 0.436 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.364, + 0.667, + 0.436 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.681, + 0.364, + 0.84, + 0.436 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.437, + 0.816, + 0.48 + ], + "angle": 0, + "content": "Input Text: \"You now need to perform object tracking on the three targets in this image and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + }, + { + "type": "image", + "bbox": [ + 0.162, + 0.484, + 0.321, + 0.556 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.484, + 0.49, + 0.556 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.484, + 0.667, + 0.556 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.681, + 0.484, + 0.84, + 0.556 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.557, + 0.82, + 0.601 + ], + "angle": 0, + "content": "Input Text: \"Continue tracking the three targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + }, + { + "type": "image", + "bbox": [ + 0.162, + 0.604, + 0.321, + 0.676 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.604, + 0.49, + 0.676 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.604, + 0.667, + 0.676 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.681, + 0.604, + 0.838, + 0.676 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.679, + 0.82, + 0.724 + ], + "angle": 0, + "content": "Input Text: \"Continue tracking the three targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.73, + 0.286, + 0.746 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.384, + 0.73, + 0.437, + 0.743 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.73, + 0.645, + 0.743 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.736, + 0.73, + 0.782, + 0.743 + ], + "angle": 0, + "content": "SAM2" + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.767, + 0.872, + 0.825 + ], + "angle": 0, + "content": "Figure 61: Task: Image to X: Object tracking, matching, and video analysis (2/4). Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SAM-2 [86]. Observation: This evaluation shows that GPT-4o has the capability of tracking objects, but it cannot generate a consistent image compared to the input image." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "82" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.198, + 0.221, + 0.295, + 0.237 + ], + "angle": 0, + "content": "Image-to-X" + }, + { + "type": "image", + "bbox": [ + 0.252, + 0.248, + 0.272, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.274, + 0.252, + 0.74, + 0.269 + ], + "angle": 0, + "content": "Evaluation: Object Tracking, Matching and Video Analysis." + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.275, + 0.323, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.275, + 0.495, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.275, + 0.666, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.677, + 0.275, + 0.839, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.332, + 0.826, + 0.375 + ], + "angle": 0, + "content": "Input Text: \"This is the first frame of a video where I've marked four targets with different colored bounding boxes. I'll subsequently provide you with other frames from this video for object tracking of these four targets. Understood?\"" + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.379, + 0.323, + 0.434 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.379, + 0.495, + 0.434 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.379, + 0.666, + 0.434 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.677, + 0.379, + 0.839, + 0.434 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.436, + 0.81, + 0.479 + ], + "angle": 0, + "content": "Input Text: \"You now need to perform object tracking on the four targets in this image and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.481, + 0.323, + 0.534 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.481, + 0.495, + 0.533 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.481, + 0.668, + 0.533 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.677, + 0.48, + 0.839, + 0.534 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.537, + 0.82, + 0.581 + ], + "angle": 0, + "content": "Input Text: \"Continue tracking the four targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.584, + 0.323, + 0.639 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.584, + 0.495, + 0.639 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.584, + 0.668, + 0.639 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.677, + 0.584, + 0.839, + 0.639 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.643, + 0.82, + 0.687 + ], + "angle": 0, + "content": "Input Text: \"Continue tracking the four targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.693, + 0.29, + 0.708 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.394, + 0.693, + 0.446, + 0.706 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.693, + 0.644, + 0.706 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.738, + 0.694, + 0.783, + 0.706 + ], + "angle": 0, + "content": "SAM2" + }, + { + "type": "image_caption", + "bbox": [ + 0.126, + 0.729, + 0.871, + 0.786 + ], + "angle": 0, + "content": "Figure 62: Task: Image to X: Object tracking, matching, and video analysis (3/4). Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SAM-2 [86]. Observation: This evaluation shows that GPT-4o has the capability of tracking objects, but it cannot generate a consistent image compared to the input image." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "83" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.199, + 0.189, + 0.295, + 0.206 + ], + "angle": 0, + "content": "Image-to-X" + }, + { + "type": "title", + "bbox": [ + 0.252, + 0.215, + 0.743, + 0.234 + ], + "angle": 0, + "content": "Evaluation: Object Tracking, Matching and Video Analysis." + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.242, + 0.323, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.242, + 0.495, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.242, + 0.667, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.676, + 0.242, + 0.839, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.316, + 0.816, + 0.358 + ], + "angle": 0, + "content": "Input Text: \"This is the first frame of a video where I've marked six targets with different colored bounding boxes. I'll subsequently provide you with other frames from this video for object tracking of these six targets. Understood?\"" + }, + { + "type": "image", + "bbox": [ + 0.161, + 0.361, + 0.323, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.361, + 0.495, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.361, + 0.666, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.677, + 0.361, + 0.838, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.434, + 0.81, + 0.477 + ], + "angle": 0, + "content": "Input Text: \"You now need to perform object tracking on the six targets in this image and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + }, + { + "type": "image", + "bbox": [ + 0.161, + 0.481, + 0.323, + 0.553 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.481, + 0.495, + 0.553 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.481, + 0.667, + 0.553 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.677, + 0.481, + 0.838, + 0.553 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.555, + 0.82, + 0.599 + ], + "angle": 0, + "content": "Input Text: \"Continue tracking the six targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + }, + { + "type": "image", + "bbox": [ + 0.162, + 0.602, + 0.323, + 0.674 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.602, + 0.495, + 0.674 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.602, + 0.667, + 0.674 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.676, + 0.602, + 0.838, + 0.674 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.675, + 0.82, + 0.718 + ], + "angle": 0, + "content": "Input Text: \"Continue tracking the six targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.725, + 0.292, + 0.74 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "text", + "bbox": [ + 0.387, + 0.725, + 0.44, + 0.738 + ], + "angle": 0, + "content": "GPT 40" + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.725, + 0.645, + 0.738 + ], + "angle": 0, + "content": "Gemini 2.0 Flash" + }, + { + "type": "text", + "bbox": [ + 0.734, + 0.725, + 0.78, + 0.738 + ], + "angle": 0, + "content": "SAM2" + }, + { + "type": "image_caption", + "bbox": [ + 0.126, + 0.763, + 0.871, + 0.819 + ], + "angle": 0, + "content": "Figure 63: Task: Image to X: Object tracking, matching, and video analysis (4/4). Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SAM-2 [86]. Observation: This evaluation shows that GPT-4o has the capability of tracking objects, but it cannot generate a consistent image compared to the input image." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "84" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.09, + 0.26, + 0.106 + ], + "angle": 0, + "content": "3 Limitations" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.122, + 0.87, + 0.166 + ], + "angle": 0, + "content": "Although GPT-4o demonstrates impressive capabilities across a wide range of image generation tasks, several limitations remain. These challenges highlight key areas for future improvement in developing unified foundation models for vision-language generation." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.182, + 0.338, + 0.196 + ], + "angle": 0, + "content": "3.1 Inconsistent Generation" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.208, + 0.869, + 0.306 + ], + "angle": 0, + "content": "While GPT-4o often produces high-quality and semantically relevant images conditioned on textual prompts, it occasionally exhibits inconsistencies. Specifically, the model may generate visually compelling outputs that deviate from precise semantic cues of the input image, such as object count, spatial layout, specific shapes, or designated colors. These inconsistencies are especially problematic in tasks requiring partial image editing or compositional accuracy. Notably, such issues are less common in diffusion-based models or discrete denoising architectures like MaskGIT [11, 6], suggesting that GPT-4o operates under a distinct generative paradigm with inherent trade-offs in fidelity and control." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.323, + 0.266, + 0.337 + ], + "angle": 0, + "content": "3.2 Hallucination" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.349, + 0.869, + 0.435 + ], + "angle": 0, + "content": "GPT-4o is also susceptible to hallucinations—producing content that is logically implausible, semantically inconsistent, or factually incorrect. These include fabricating non-existent objects or geographical features (e.g., imaginary islands or landmarks), and misrepresenting relationships between entities. Such errors are particularly prevalent in complex or underspecified prompts, where the model appears to rely on internal priors rather than grounded world knowledge. While hallucination is a common challenge across generative models, it poses notable limitations for real-world applications demanding precision, such as education, medical illustration, or scientific visualization." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.451, + 0.239, + 0.464 + ], + "angle": 0, + "content": "3.3 Data Bias" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.476, + 0.87, + 0.63 + ], + "angle": 0, + "content": "Despite strong alignment between text and vision modalities, GPT-4o struggles with data bias issue, which fail in generating underrepresented cultural elements and rendering non-Latin scripts such as Chinese, Japanese, and Arabic. The generated characters are often incomplete, distorted, or replaced with Latin-like approximations. These artifacts reflect underlying challenges in multilingual representation, likely due to limited exposure to diverse scripts during training and the inherent difficulty of accurate typographic rendering in pixel space. This phenomenon is emblematic of a larger issue in AI systems—data bias. The training data used to develop models like GPT-4o may disproportionately represent certain languages, cultures, and writing systems, leading to disparities in performance across different linguistic groups. These biases are not only technical limitations but also ethical concerns, as they can contribute to the exclusion of underrepresented languages and cultures from AI applications. As vision-language models are increasingly deployed globally, improving support for multilingual text remains a crucial step toward inclusive and culturally competent AI systems." + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.651, + 0.257, + 0.667 + ], + "angle": 0, + "content": "4 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.683, + 0.869, + 0.809 + ], + "angle": 0, + "content": "In conclusion, this work presents a comprehensive study on the development of unified vision-language generative models, with a focus on evaluating GPT-4o across a wide range of image generation tasks. Our analysis shows that GPT-4o demonstrates strong capabilities in aligning vision and language, achieving competitive results across text-to-image, image-to-image, image-to-3D, and image-to-X tasks. However, limitations remain in inconsistent generation, hallucination, and data bias in underrepresented cultural elements and non-Latin scripts, highlighting current trade-offs in model design and training data coverage. We also emphasize that architecture alone does not determine success; training data, model scale, and optimization strategies are equally critical components of progress. We hope future work will provide deeper empirical insights into such proprietary systems and clarify their position within the broader landscape of unified generative modeling." + }, + { + "type": "title", + "bbox": [ + 0.129, + 0.827, + 0.225, + 0.843 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.854, + 0.868, + 0.894 + ], + "angle": 0, + "content": "[1] Hao Ai, Zidong Cao, Haonan Lu, Chen Chen, Jian Ma, Pengyuan Zhou, Tae-Kyun Kim, Pan Hui, and Lin Wang. Dream360: Diverse and immersive outdoor virtual scene creation via transformer-based 360 image outpainting. IEEE transactions on visualization and computer graphics, 2024. 34, 42" + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.898, + 0.581, + 0.912 + ], + "angle": 0, + "content": "[2] Ideogram AI. Ideogram. https://ideogram.ai/, 2024. 10, 11, 12" + }, + { + "type": "list", + "bbox": [ + 0.142, + 0.854, + 0.868, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "85" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.092, + 0.87, + 0.119 + ], + "angle": 0, + "content": "[3] Youngmin Baek, Bado Lee, Dongyoon Han, Sangdoo Yun, and Hwalsuk Lee. Character region awareness for text detection. In CVPR, 2019. 78, 79" + }, + { + "type": "ref_text", + "bbox": [ + 0.141, + 0.122, + 0.871, + 0.15 + ], + "angle": 0, + "content": "[4] Jinbin Bai, Wei Chow, Ling Yang, Xiangtai Li, Juncheng Li, Hanwang Zhang, and Shuicheng Yan. Humanedit: A high-quality human-rewarded dataset for instruction-based image editing. arXiv preprint arXiv:2412.04280, 2024. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.152, + 0.871, + 0.18 + ], + "angle": 0, + "content": "[5] Jinbin Bai, Zhen Dong, Aosong Feng, Xiao Zhang, Tian Ye, Kaicheng Zhou, and Mike Zheng Shou. Integrating view conditions for image synthesis. arXiv preprint arXiv:2310.16002, 2023. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.183, + 0.872, + 0.222 + ], + "angle": 0, + "content": "[6] Jinbin Bai, Tian Ye, Wei Chow, Enxin Song, Qing-Guo Chen, Xiangtai Li, Zhen Dong, Lei Zhu, and Shuicheng Yan. Meissonic: Revitalizing masked generative transformers for efficient high-resolution text-to-image synthesis. arXiv preprint arXiv:2410.08261, 2024. 5, 85" + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.224, + 0.799, + 0.24 + ], + "angle": 0, + "content": "[7] Shane Barratt and Rishi Sharma. A note on the inception score. arXiv preprint arXiv:1801.01973, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.243, + 0.871, + 0.282 + ], + "angle": 0, + "content": "[8] James Betker, Gabriel Goh, Li Jing, Tim Brooks, Jianfeng Wang, Linjie Li, Long Ouyang, Juntang Zhuang, Joyce Lee, Yufei Guo, et al. Improving image generation with better captions. Computer Science. https://cdn.openai.com/papers/dall-e-3.pdf, 2023.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.285, + 0.871, + 0.313 + ], + "angle": 0, + "content": "[9] Manuel Brack, Felix Friedrich, Katharina Kornmeier, Linoy Tsaban, Patrick Schramowski, Kristian Kersting, and Apolinário Passos. *Ledits++: Limitless image editing using text-to-image models.* 2023. 21, 25" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.316, + 0.871, + 0.343 + ], + "angle": 0, + "content": "[10] Tim Brooks, Aleksander Holynski, and Alexei A Efros. Instructpix2pix: Learning to follow image editing instructions. arXiv preprint arXiv:2211.09800, 2022. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.346, + 0.871, + 0.373 + ], + "angle": 0, + "content": "[11] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11315-11325, 2022. 85" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.376, + 0.871, + 0.403 + ], + "angle": 0, + "content": "[12] Haoyu Chen, Xiaojie Xu, Wenbo Li, Jingjing Ren, Tian Ye, Songhua Liu, Ying-Cong Chen, Lei Zhu, and Xinchao Wang. Posta: A go-to framework for customized artistic poster generation. arXiv preprint arXiv:2503.14908, 2025. 10, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.406, + 0.871, + 0.445 + ], + "angle": 0, + "content": "[13] Liang Chen, Shuai Bai, Wenhao Chai, Weichu Xie, Haozhe Zhao, Leon Vinci, Junyang Lin, and Baobao Chang. Multimodal representation alignment for image generation: Text-image interleaved control is easier than you think. arXiv preprint arXiv:2502.20172, 2025. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.449, + 0.871, + 0.476 + ], + "angle": 0, + "content": "[14] Liang-Chieh Chen, George Papandreou, Florian Schroff, and Hartwig Adam. Rethinking atrous convolution for semantic image segmentation. arXiv preprint arXiv:1706.05587, 2017. 62, 64" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.479, + 0.871, + 0.518 + ], + "angle": 0, + "content": "[15] Sixiang Chen, Tian Ye, Jinbin Bai, Erkang Chen, Jun Shi, and Lei Zhu. Sparse sampling transformer with uncertainty-driven ranking for unified removal of raindrops and rain streaks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 13106-13117, 2023. 34" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.522, + 0.871, + 0.549 + ], + "angle": 0, + "content": "[16] Sixiang Chen, Tian Ye, Yun Liu, and Erkang Chen. Snowformer: Context interaction transformer with scale-awareness for single image desnowing. arXiv preprint arXiv:2208.09703, 2022. 34" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.552, + 0.871, + 0.591 + ], + "angle": 0, + "content": "[17] Sixiang Chen, Tian Ye, Kai Zhang, Zhaohu Xing, Yunlong Lin, and Lei Zhu. Teaching tailored to talent: Adverse weather restoration via prompt pool and depth-anything constraint. In European Conference on Computer Vision, pages 95–115. Springer, 2024. 34" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.595, + 0.871, + 0.633 + ], + "angle": 0, + "content": "[18] Tianqi Chen, Yongfei Liu, Zhendong Wang, Jianbo Yuan, Quanzeng You, Hongxia Yang, and Mingyuan Zhou. Improving in-context learning in diffusion models with visual context-modulated prompts. arXiv preprint arXiv:2312.01408, 2023. 56" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.637, + 0.871, + 0.676 + ], + "angle": 0, + "content": "[19] Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Janus-pro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.68, + 0.871, + 0.706 + ], + "angle": 0, + "content": "[20] Marcos V. Conde, Gregor Geigle, and Radu Timofte. Instructir: High-quality image restoration following human instructions. In ECCV, 2024. 34, 35, 36, 37, 38, 39, 40" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.71, + 0.871, + 0.737 + ], + "angle": 0, + "content": "[21] Runmin Cong, Yuchen Guan, Jinpeng Chen, Wei Zhang, Yao Zhao, and Sam Kwong. Sddnet: Style-guided dual-layer disentanglement network for shadow detection. In ACM MM, 2023. 69, 72" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.74, + 0.871, + 0.766 + ], + "angle": 0, + "content": "[22] Ciprian Corneanu, Raghudeep Gadde, and Aleix M Martinez. Latentpaint: Image inpainting in latent space with diffusion models. In WACV, 2024. 34, 41" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.77, + 0.871, + 0.796 + ], + "angle": 0, + "content": "[23] Yingying Deng, Fan Tang, Weiming Dong, Chongyang Ma, Xingjia Pan, Lei Wang, and Changsheng Xu. Stytr2: Image style transfer with transformers. In CVPR, 2022. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.8, + 0.871, + 0.839 + ], + "angle": 0, + "content": "[24] Runpei Dong, Chunrui Han, Yuang Peng, Zekun Qi, Zheng Ge, Jinrong Yang, Liang Zhao, Jianjian Sun, Hongyu Zhou, Haoran Wei, et al. Dreamlmm: Synergistic multimodal comprehension and creation. arXiv preprint arXiv:2309.11499, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.843, + 0.871, + 0.87 + ], + "angle": 0, + "content": "[25] Wei Dong, Han Zhou, Yuqiong Tian, Jingke Sun, Xiaohong Liu, Guangtao Zhai, and Jun Chen. Shadowrefiner: Towards mask-free shadow removal via fast fourier transformer. arXiv preprint arXiv:2406.02559. 44" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.873, + 0.871, + 0.912 + ], + "angle": 0, + "content": "[26] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. 1" + }, + { + "type": "list", + "bbox": [ + 0.137, + 0.092, + 0.872, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "86" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.136, + 0.092, + 0.871, + 0.133 + ], + "angle": 0, + "content": "[27] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first international conference on machine learning*, 2024. 10, 11, 47, 51" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.134, + 0.872, + 0.176 + ], + "angle": 0, + "content": "[28] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 12873-12883, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.179, + 0.871, + 0.207 + ], + "angle": 0, + "content": "[29] Aosong Feng, Weikang Qiu, Jinbin Bai, Kaicheng Zhou, Zhen Dong, Xiao Zhang, Rex Ying, and Leandros Tassiulas. An item is worth a prompt: Versatile image editing with disentangled control. arXiv preprint arXiv:2403.04880, 2024. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.209, + 0.871, + 0.236 + ], + "angle": 0, + "content": "[30] Tsu-Jui Fu, Wenze Hu, Xianzhi Du, William Yang Wang, Yinfei Yang, and Zhe Gan. Guiding instruction-based image editing via multimodal large language models. ICLR, 2024. 21, 22, 23, 24" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.239, + 0.871, + 0.267 + ], + "angle": 0, + "content": "[31] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. ICLR, 2023. 28" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.27, + 0.871, + 0.298 + ], + "angle": 0, + "content": "[32] Jun Gao, Tianchang Shen, Zian Wang, Wenzheng Chen, Kangxue Yin, Daiqing Li, Or Litany, Zan Gojcic, and Sanja Fidler. Get3d: A generative model of high quality 3d textured shapes learned from images. NeurIPS, 2022. 58" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.3, + 0.871, + 0.327 + ], + "angle": 0, + "content": "[33] Leon A Gatys, Alexander S Ecker, and Matthias Bethge. Image style transfer using convolutional neural networks. CVPR, 2016. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.33, + 0.872, + 0.37 + ], + "angle": 0, + "content": "[34] Yuying Ge, Sijie Zhao, Jinguo Zhu, Yixiao Ge, Kun Yi, Lin Song, Chen Li, Xiaohan Ding, and Ying Shan. Seed-x: Multimodal models with unified multi-granularity comprehension and generation. arXiv preprint arXiv:2404.14396, 2024.1" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.373, + 0.871, + 0.402 + ], + "angle": 0, + "content": "[35] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks. Communications of the ACM, 63(11):139–144, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.404, + 0.871, + 0.444 + ], + "angle": 0, + "content": "[36] Yuchao Gu, Xintao Wang, Jay Zhangjie Wu, Yujun Shi, Yunpeng Chen, Zihan Fan, Wuyou Xiao, Rui Zhao, Shuning Chang, Weijia Wu, et al. Mix-of-show: Decentralized low-rank adaptation for multi-concept customization of diffusion models. In NeurIPS, 2024. 28" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.447, + 0.871, + 0.475 + ], + "angle": 0, + "content": "[37] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. NeurIPS, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.477, + 0.871, + 0.506 + ], + "angle": 0, + "content": "[38] Qibin Hou, Yuying Ge, Jing Zhang, Yuchao Dai, and Ming-Ming Cheng. Storydiffusion: Consistent self-attention for long-range image and video generation. In Advances in Neural Information Processing Systems (NeurIPS), 2024. 31, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.508, + 0.871, + 0.536 + ], + "angle": 0, + "content": "[39] Qiming Hu, Hainuo Wang, and Xiaojie Guo. Single image reflection separation via dual-stream interactive transformers. Advances in Neural Information Processing Systems, 37:55228-55248, 2024. 45" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.538, + 0.871, + 0.567 + ], + "angle": 0, + "content": "[40] Jiancheng Huang, Yi Huang, Jianzhuang Liu, Donghao Zhou, Yifan Liu, and Shifeng Chen. Dual-schedule inversion: Training-and tuning-free inversion for real image editing. arXiv preprint arXiv:2412.11152, 2024. 21" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.568, + 0.871, + 0.609 + ], + "angle": 0, + "content": "[41] Kaiyi Huang, Chengqi Duan, Kaiyue Sun, Enze Xie, Zhenguo Li, and Xihui Liu. T2i-compbench++: An enhanced and comprehensive benchmark for compositional text-to-image generation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2025. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.611, + 0.871, + 0.64 + ], + "angle": 0, + "content": "[42] Lianghua Huang, Wei Wang, Zhi-Fan Wu, Yupeng Shi, Huanzhang Dou, Chen Liang, Yutong Feng, Yu Liu, and Jingren Zhou. In-context lora for diffusion transformers. arXiv preprint arXiv:2410.23775, 2024. 56" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.642, + 0.871, + 0.669 + ], + "angle": 0, + "content": "[43] Xun Huang and Serge Belongie. Arbitrary style transfer in real-time with adaptive instance normalization. In ICCV, 2017. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.672, + 0.871, + 0.702 + ], + "angle": 0, + "content": "[44] Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, and Furu Wei. Layoutlmv3: Pre-training for document ai with unified text and image masking. In ACM MM, 2022. 77" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.704, + 0.871, + 0.731 + ], + "angle": 0, + "content": "[45] Zixuan Huang, Stefan Stojanov, Anh Thai, Varun Jampani, and James M Rehg. Planes vs. chairs: Category-guided 3d shape learning without any 3d cues. In ECCV, 2022. 58" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.733, + 0.871, + 0.761 + ], + "angle": 0, + "content": "[46] Jiaxiu Jiang, Yabo Zhang, Kailai Feng, Xiaohe Wu, Wenbo Li, Renjing Pei, Fan Li, and Wangmeng Zuo. Mc2: Multi-concept guidance for customized multi-concept generation. arXiv preprint arXiv:2404.05268, 2024. 28" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.764, + 0.871, + 0.791 + ], + "angle": 0, + "content": "[47] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In ECCV, 2016. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.794, + 0.871, + 0.835 + ], + "angle": 0, + "content": "[48] Bingxin Ke, Anton Obukhov, Shengyu Huang, Nando Metzger, Rodrigo Caye Daudt, and Konrad Schindler. Repurposing diffusion-based image generators for monocular depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 76" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.837, + 0.871, + 0.865 + ], + "angle": 0, + "content": "[49] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In ICCV, 2023. 56" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.868, + 0.871, + 0.896 + ], + "angle": 0, + "content": "[50] Nupur Kumari, Bingliang Zhang, Richard Zhang, Eli Shechtman, and Jun-Yan Zhu. Multi-concept customization of text-to-image diffusion. In CVPR, 2023. 28" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.898, + 0.871, + 0.914 + ], + "angle": 0, + "content": "[51] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024. 2, 5, 8, 9, 10, 11, 47, 48, 49" + }, + { + "type": "list", + "bbox": [ + 0.136, + 0.092, + 0.872, + 0.914 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "87" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.136, + 0.092, + 0.871, + 0.132 + ], + "angle": 0, + "content": "[52] Bolin Lai, Felix Juefei-Xu, Miao Liu, Xiaoliang Dai, Nikhil Mehta, Chenguang Zhu, Zeyi Huang, James M Rehg, Sang-min Lee, Ning Zhang, et al. Unleashing in-context learning of autoregressive models for few-shot image manipulation. arXiv preprint arXiv:2412.01027, 2024. 56" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.134, + 0.714, + 0.15 + ], + "angle": 0, + "content": "[53] Jiachen Li, Jitesh Jain, and Humphrey Shi. Matting anything. arXiv: 2306.05399, 2023. 66" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.152, + 0.87, + 0.18 + ], + "angle": 0, + "content": "[54] Jiachen Li, Jitesh Jain, and Humphrey Shi. Matting anything. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1775–1785, 2024. 68" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.182, + 0.871, + 0.222 + ], + "angle": 0, + "content": "[55] Junyi Li, Zhilu Zhang, Xiaoyu Liu, Chaoyu Feng, Xiaotao Wang, Lei Lei, and Wangmeng Zuo. Spatially adaptive self-supervised learning for real-world image denoising. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2023. 34" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.224, + 0.871, + 0.252 + ], + "angle": 0, + "content": "[56] Yachuan Li, Xavier Soria Poma, Yun Bai, Qian Xiao, Chaozhi Yang, Guanlin Li, and Zongmin Li. Edmb: Edge detector with mamba. arXiv preprint arXiv:2501.04846, 2025. 66, 67" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.254, + 0.871, + 0.281 + ], + "angle": 0, + "content": "[57] Yijun Li, Chen Fang, Jimei Yang, Zhaowen Wang, Xin Lu, and Ming-Hsuan Yang. Universal style transfer via feature transforms. In NIPS, 2017. 18" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.285, + 0.871, + 0.312 + ], + "angle": 0, + "content": "[58] Zijie Li, Henry Li, Yichun Shi, Amir Barati Farimani, Yuval Kluger, Linjie Yang, and Peng Wang. Dual diffusion for unified image generation and understanding. arXiv preprint arXiv:2501.00289, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.314, + 0.871, + 0.343 + ], + "angle": 0, + "content": "[59] Zhexin Liang, Zhaochen Li, Shangchen Zhou, Chongyi Li, and Chen Change Loy. Control color: Multimodal diffusion-based interactive image colorization. arXiv preprint arXiv:2402.10855, 2024. 34, 43" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.345, + 0.871, + 0.373 + ], + "angle": 0, + "content": "[60] Xin Lin, Chao Ren, Kelvin CK Chan, Lu Qi, Jinshan Pan, and Ming-Hsuan Yang. Multi-task image restoration guided by robust dino features. arXiv preprint arXiv:2312.01677, 2023. 34" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.375, + 0.871, + 0.403 + ], + "angle": 0, + "content": "[61] Xin Lin, Chao Ren, and Xiao Liu. Unsupervised image denoising in real-world scenarios via self-collaboration parallel generative adversarial branches. In ICCV, 2023. 34" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.406, + 0.871, + 0.444 + ], + "angle": 0, + "content": "[62] Xin Lin, Jingtong Yue, Sixian Ding, Chao Ren, Lu Qi, and Ming-Hsuan Yang. Dual degradation representation for joint deraining and low-light enhancement in the dark. IEEE Transactions on Circuits and Systems for Video Technology, 2024. 34" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.447, + 0.871, + 0.476 + ], + "angle": 0, + "content": "[63] Xin Lin, Yuyan Zhou, Jingtong Yue, Chao Ren, Kelvin CK Chan, Lu Qi, and Ming-Hsuan Yang. Re-boosting self-collaboration parallel prompt gan for unsupervised image restoration. arXiv preprint arXiv:2408.09241, 2024. 34" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.479, + 0.871, + 0.518 + ], + "angle": 0, + "content": "[64] Bingchen Liu, Ehsan Akhgari, Alexander Visheratin, Aleks Kamko, Linmiao Xu, Shivam Shrirao, Chase Lambert, Joao Souza, Suhail Doshi, and Daiqing Li. Playground v3: Improving text-to-image alignment with deep-fusion large language models. arXiv preprint arXiv:2409.10695, 2024. 10, 12, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.521, + 0.871, + 0.549 + ], + "angle": 0, + "content": "[65] Dongyang Liu, Shitian Zhao, Le Zhuo, Weifeng Lin, Yu Qiao, Hongsheng Li, and Peng Gao. Lumina-mgpt: Illuminate flexible photorealistic text-to-image generation with multimodal generative pretraining, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.551, + 0.871, + 0.591 + ], + "angle": 0, + "content": "[66] Haipeng Liu, Yang Wang, Biao Qian, Meng Wang, and Yong Rui. Structure matters: Tackling the semantic discrepancy in diffusion models for image inpainting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 34, 42" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.594, + 0.871, + 0.621 + ], + "angle": 0, + "content": "[67] Hao Liu, Wilson Yan, Matei Zaharia, and Pieter Abbeel. World model on million-length video and language with ringattention. arXiv preprint arXiv:2402.08268, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.624, + 0.871, + 0.662 + ], + "angle": 0, + "content": "[68] Minghua Liu, Chao Xu, Haian Jin, Linghao Chen, Mukund Varma T, Zexiang Xu, and Hao Su. One-2-3-45: Any single image to 3d mesh in 45 seconds without per-shape optimization. Advances in Neural Information Processing Systems, 2023. 58" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.666, + 0.871, + 0.705 + ], + "angle": 0, + "content": "[69] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object. In Proceedings of the IEEE/CVF international conference on computer vision, 2023. 58" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.709, + 0.871, + 0.749 + ], + "angle": 0, + "content": "[70] Xiaoxiao Long, Yuan-Chen Guo, Cheng Lin, Yuan Liu, Zhiyang Dou, Lingjie Liu, Yuexin Ma, Song-Hai Zhang, Marc Habermann, Christian Theobalt, et al. Wonder3d: Single image to 3d using cross-domain diffusion. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2024. 58" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.752, + 0.871, + 0.779 + ], + "angle": 0, + "content": "[71] Aaron Lou, Chenlin Meng, and Stefano Ermon. Discrete diffusion modeling by estimating the ratios of the data distribution. arXiv preprint arXiv:2310.16834, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.782, + 0.871, + 0.822 + ], + "angle": 0, + "content": "[72] Yiyang Ma, Xingchao Liu, Xiaokang Chen, Wen Liu, Chengyue Wu, Zhiyu Wu, Zizheng Pan, Zhenda Xie, Haowei Zhang, Liang Zhao, et al. Janusflow: Harmonizing autoregression and rectified flow for unified multimodal understanding and generation. arXiv preprint arXiv:2411.07975, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.825, + 0.871, + 0.853 + ], + "angle": 0, + "content": "[73] Chenlin Meng, Kristy Choi, Jiaming Song, and Stefano Ermon. Concrete score matching: Generalized score matching for discrete data. Advances in Neural Information Processing Systems, 35:34532-34545, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.855, + 0.871, + 0.894 + ], + "angle": 0, + "content": "[74] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2019. 58" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.898, + 0.742, + 0.913 + ], + "angle": 0, + "content": "[75] Midjourney. Midjourney. https://www.midjourney.com, 2024. 2, 6, 7, 18, 19, 20, 59, 60, 61" + }, + { + "type": "list", + "bbox": [ + 0.136, + 0.092, + 0.871, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "88" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.136, + 0.092, + 0.871, + 0.12 + ], + "angle": 0, + "content": "[76] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 2021. 58" + }, + { + "type": "ref_text", + "bbox": [ + 0.137, + 0.122, + 0.872, + 0.163 + ], + "angle": 0, + "content": "[77] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2020. 58" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.165, + 0.749, + 0.18 + ], + "angle": 0, + "content": "[78] OpenAI. Addendum to gpt-4o system card: 4o image generation, 2025. Accessed: 2025-04-02. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.184, + 0.871, + 0.222 + ], + "angle": 0, + "content": "[79] Junyi Pan, Xiaoguang Han, Weikai Chen, Jiapeng Tang, and Kui Jia. Deep mesh reconstruction from single rgb images via topology modification networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2019. 58" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.226, + 0.871, + 0.266 + ], + "angle": 0, + "content": "[80] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed AA Osman, Dimitrios Tzionas, and Michael J Black. Expressive body capture: 3d hands, face, and body from a single image. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2019. 58" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.269, + 0.871, + 0.308 + ], + "angle": 0, + "content": "[81] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.312, + 0.871, + 0.352 + ], + "angle": 0, + "content": "[82] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. SDXL: Improving latent diffusion models for high-resolution image synthesis. In The Twelfth International Conference on Learning Representations (ICLR), 2024. 1, 47, 50" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.355, + 0.871, + 0.395 + ], + "angle": 0, + "content": "[83] Guocheng Qian, Jinjie Mai, Abdullah Hamdi, Jian Ren, Aliaksandr Siarohin, Bing Li, Hsin-Ying Lee, Ivan Skorokhodov, Peter Wonka, Sergey Tulyakov, et al. Magic123: One image to high-quality 3d object generation using both 2d and 3d diffusion priors. arXiv preprint arXiv:2306.17843, 2023. 58" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.398, + 0.871, + 0.425 + ], + "angle": 0, + "content": "[84] Chu-Jie Qin, Rui-Qi Wu, Zikun Liu, Xin Lin, Chun-Le Guo, Hyun Hee Park, and Chongyi Li. Restore anything with masks: Leveraging mask image modeling for blind all-in-one image restoration. In ECCV, 2024. 34" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.428, + 0.871, + 0.455 + ], + "angle": 0, + "content": "[85] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.459, + 0.871, + 0.497 + ], + "angle": 0, + "content": "[86] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Radle, Chloe Rolland, Laura Gustafson, et al. SAM 2: Segment anything in images and videos. *ICLR*, 2025. 80, 81, 82, 83, 84" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.502, + 0.871, + 0.528 + ], + "angle": 0, + "content": "[87] Jeremy Reizenstein, Roman Shapovalov, Philipp Henzler, Luca Sbordone, Patrick Labatut, and David Novotny. Common objects in 3d: Large-scale learning and evaluation of real-life 3d category reconstruction. In ICCV, 2021. 58" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.532, + 0.871, + 0.559 + ], + "angle": 0, + "content": "[88] Bin Ren, Yawei Li, Nancy Mehta, and Radu Timofte. The ninth nitire 2024 efficient super-resolution challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2024. 34" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.562, + 0.871, + 0.602 + ], + "angle": 0, + "content": "[89] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 10684-10695, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.606, + 0.871, + 0.645 + ], + "angle": 0, + "content": "[90] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10684-10695, June 2022. 47, 52" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.648, + 0.871, + 0.675 + ], + "angle": 0, + "content": "[91] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In CVPR, 2023. 28" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.679, + 0.871, + 0.718 + ], + "angle": 0, + "content": "[92] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.722, + 0.871, + 0.761 + ], + "angle": 0, + "content": "[93] Subham Sahoo, Marianne Arriola, Yair Schiff, Aaron Gokaslan, Edgar Marroquin, Justin Chiu, Alexander Rush, and Volodymyr Kuleshov. Simple and effective masked diffusion language models. Advances in Neural Information Processing Systems, 37:130136-130184, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.764, + 0.871, + 0.791 + ], + "angle": 0, + "content": "[94] Qingyu Shi, Lu Qi, Jianzong Wu, Jinbin Bai, Jingbo Wang, Yunhai Tong, Xiangtai Li, and Ming-Husan Yang. Relation- booth: Towards relation-aware customized object generation. arXiv preprint arXiv:2410.23280, 2024. 28" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.795, + 0.871, + 0.834 + ], + "angle": 0, + "content": "[95] Haoze Sun, Wenbo Li, Jianzhuang Liu, Haoyu Chen, Renjing Pei, Xueyi Zou, Youliang Yan, and Yujiu Yang. Coser: Bridging image and language for cognitive super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 25868-25878, 2024. 34" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.838, + 0.871, + 0.865 + ], + "angle": 0, + "content": "[96] Quan Sun, Qiying Yu, Yufeng Cui, Fan Zhang, Xiaosong Zhang, Yueze Wang, Hongcheng Gao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative pretraining in multimodality. arXiv preprint arXiv:2307.05222, 2023.1" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.868, + 0.871, + 0.895 + ], + "angle": 0, + "content": "[97] Alexander Swerdlow, Mihir Prabhudesai, Siddharth Gandhi, Deepak Pathak, and Katerina Fragkiadaki. Unified multimodal discrete diffusion. arXiv preprint arXiv:2503.20853, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.898, + 0.871, + 0.913 + ], + "angle": 0, + "content": "[98] Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024. 1" + }, + { + "type": "list", + "bbox": [ + 0.136, + 0.092, + 0.872, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "89" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.136, + 0.092, + 0.871, + 0.156 + ], + "angle": 0, + "content": "[99] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 2, 3, 5, 6, 7, 8, 9, 10, 12, 14, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 59, 60, 61, 63, 64, 65, 67, 68, 70, 71, 72, 73, 75, 76, 77, 78, 79, 81, 82, 83, 84" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.161, + 0.871, + 0.202 + ], + "angle": 0, + "content": "[100] Shengbang Tong, David Fan, Jiachen Zhu, Yunyang Xiong, Xinlei Chen, Koustuv Sinha, Michael Rabbat, Yann LeCun, Saining Xie, and Zhuang Liu. Metamorph: Multimodal understanding and generation via instruction tuning. arXiv preprint arXiv:2412.14164, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.204, + 0.871, + 0.232 + ], + "angle": 0, + "content": "[101] Chunwei Wang, Guansong Lu, Junwei Yang, Runhui Huang, Jianhua Han, Lu Hou, Wei Zhang, and Hang Xu. Illumine: Illuminating your llms to see, draw, and self-enhance. arXiv preprint arXiv:2412.06673, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.234, + 0.871, + 0.262 + ], + "angle": 0, + "content": "[102] Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, and Yu-Gang Jiang. Pixel2mesh: Generating 3d mesh models from single rgb images. In Proceedings of the European conference on computer vision (ECCV), 2018. 58" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.265, + 0.871, + 0.293 + ], + "angle": 0, + "content": "[103] Xierui Wang, Siming Fu, Qihan Huang, Wanggui He, and Hao Jiang. Ms-diffusion: Multi-subject zero-shot image personalization with layout guidance. arXiv preprint arXiv:2406.07209, 2024. 28, 30" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.296, + 0.871, + 0.325 + ], + "angle": 0, + "content": "[104] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.328, + 0.871, + 0.355 + ], + "angle": 0, + "content": "[105] Zhendong Wang, Yifan Jiang, Yadong Lu, Pengcheng He, Weizhu Chen, Zhangyang Wang, Mingyuan Zhou, et al. In-context learning unlocked for diffusion models. NeurIPS, 2023. 56" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.359, + 0.871, + 0.386 + ], + "angle": 0, + "content": "[106] Zhenyu Wang, Aoxue Li, Zhenguo Li, and Xihui Liu. Genartist: Multimodal llm as an agent for unified image generation and editing. NeurIPS, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.389, + 0.871, + 0.417 + ], + "angle": 0, + "content": "[107] Alex Warren, Ke Xu, Jiaying Lin, Gary KL Tam, and Rynson WH Lau. Effective video mirror detection with inconsistent motion cues. In CVPR, 2024. 69, 71" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.421, + 0.871, + 0.449 + ], + "angle": 0, + "content": "[108] Jianzong Wu, Chao Tang, Jingbo Wang, Yanhong Zeng, Xiangtai Li, and Yunhai Tong. Diffensei: Bridging multi-modal lms and diffusion models for customized manga generation. CVPR, 2025. 31, 33" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.451, + 0.871, + 0.491 + ], + "angle": 0, + "content": "[109] Size Wu, Wenwei Zhang, Lumin Xu, Sheng Jin, Zhonghua Wu, Qingyi Tao, Wentao Liu, Wei Li, and Chen Change Loy. Harmonizing visual representations for unified multimodal understanding and generation. arXiv preprint arXiv:2503.21979, 2025. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.495, + 0.871, + 0.535 + ], + "angle": 0, + "content": "[110] Yecheng Wu, Zhuoyang Zhang, Junyu Chen, Haotian Tang, Dacheng Li, Yunhao Fang, Ligeng Zhu, Enze Xie, Hongxu Yin, Li Yi, et al. Vila-u: a unified foundation model integrating visual understanding and generation. arXiv preprint arXiv:2409.04429, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.538, + 0.871, + 0.566 + ], + "angle": 0, + "content": "[111] Yifan Xia, Yuying Ge, Jing Zhang, Yuchao Dai, and Ming-Ming Cheng. Seed-story: Multimodal long story generation with large language model. arXiv preprint arXiv:2407.08683, 2024. 31, 32" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.569, + 0.871, + 0.597 + ], + "angle": 0, + "content": "[112] Shitao Xiao, Yueze Wang, Junjie Zhou, Huaying Yuan, Xingrun Xing, Ruiran Yan, Shuting Wang, Tiejun Huang, and Zheng Liu. Omnigen: Unified image generation. arXiv preprint arXiv:2409.11340, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.6, + 0.871, + 0.639 + ], + "angle": 0, + "content": "[113] Jiale Xu, Weihao Cheng, Yiming Gao, Xintao Wang, Shenghua Gao, and Ying Shan. Instantmesh: Efficient 3d mesh generation from a single image with sparse-view large reconstruction models. arXiv preprint arXiv:2404.07191, 2024.58" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.643, + 0.871, + 0.683 + ], + "angle": 0, + "content": "[114] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10371-10381, 2024. 74, 75" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.687, + 0.871, + 0.715 + ], + "angle": 0, + "content": "[115] Ling Yang, Zhaochen Yu, Chenlin Meng, Minkai Xu, Stefano Ermon, and Bin Cui. Mastering text-to-image diffusion: Recaptioning, planning, and generating with multimodal llms. In ICML, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.718, + 0.854, + 0.734 + ], + "angle": 0, + "content": "[116] Hang Yu, Ruilin Li, Shaorong Xie, and Jiayan Qiu. Shadow-eligible image outpainting. In CVPR, 2024. 34, 42" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.737, + 0.871, + 0.775 + ], + "angle": 0, + "content": "[117] Haobo Yuan, Xiangtai Li, Tao Zhang, Zilong Huang, Shilin Xu, Shunping Ji, Yunhai Tong, Lu Qi, Jiashi Feng, and Ming-Hsuan Yang. Sa2va: Marrying sam2 with llava for dense grounded understanding of images and videos. arXiv, 2025. 62, 63" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.78, + 0.871, + 0.808 + ], + "angle": 0, + "content": "[118] Yu Yuan, Xijun Wang, Yichen Sheng, Prateek Chennuri, Xingguang Zhang, and Stanley Chan. Generative photography: Scene-consistent camera control for realistic text-to-image synthesis. arXiv preprint arXiv:2412.02168, 2024. 53, 54, 55" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.811, + 0.871, + 0.851 + ], + "angle": 0, + "content": "[119] Cheng Zhang, Qianyi Wu, Camilo Cruz Gambardella, Xiaoshui Huang, Dinh Phung, Wanli Ouyang, and Jianfei Cai. Taming stable diffusion for text to \\(360^{\\circ}\\) panorama image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 17" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.854, + 0.871, + 0.882 + ], + "angle": 0, + "content": "[120] Kai Zhang, Lingbo Mo, Wenhu Chen, Huan Sun, and Yu Su. Magicbrush: A manually annotated dataset for instruction-guided image editing. In NeurIPS, 2023. 21, 25, 26, 27" + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.885, + 0.871, + 0.911 + ], + "angle": 0, + "content": "[121] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models, 2023. 47" + }, + { + "type": "list", + "bbox": [ + 0.13, + 0.092, + 0.871, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "90" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.092, + 0.871, + 0.119 + ], + "angle": 0, + "content": "[122] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Scaling in-the-wild training for diffusion-based illumination harmonization and editing by imposing consistent light transport. In ICLR, 2025. 34, 46" + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.122, + 0.873, + 0.15 + ], + "angle": 0, + "content": "[123] Wenwei Zhang, Jiangmiao Pang, Kai Chen, and Chen Change Loy. K-net: Towards unified image segmentation. Advances in Neural Information Processing Systems, 34:10326-10338, 2021. 62, 65" + }, + { + "type": "ref_text", + "bbox": [ + 0.128, + 0.153, + 0.871, + 0.192 + ], + "angle": 0, + "content": "[124] Xinchen Zhang, Ling Yang, Guohao Li, Yaqi Cai, Jiake Xie, Yong Tang, Yujiu Yang, Mengdi Wang, and Bin Cui. Itercomp: Iterative composition-aware feedback learning from model gallery for text-to-image generation. arXiv preprint arXiv:2410.07171, 2024.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.195, + 0.871, + 0.223 + ], + "angle": 0, + "content": "[125] Yuxuan Zhang, Yiren Song, Jiaming Liu, Rui Wang, Jinpeng Yu, Hao Tang, Huaxia Li, Xu Tang, Yao Hu, Han Pan, et al. Ssr-encoder: Encoding selective subject representation for subject-driven generation. In CVPR, 2024. 28" + }, + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.226, + 0.871, + 0.253 + ], + "angle": 0, + "content": "[126] Chuyang Zhao, Yuxing Song, Wenhao Wang, Haocheng Feng, Errui Ding, Yifan Sun, Xinyan Xiao, and Jingdong Wang. Monofrformer: One transformer for both diffusion and autoregression. arXiv preprint arXiv:2409.16280, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.256, + 0.871, + 0.283 + ], + "angle": 0, + "content": "[127] Peng Zheng, Dehong Gao, Deng-Ping Fan, Li Liu, Jorma Laaksonen, Wanli Ouyang, and Nicu Sebe. Bilateral reference for high-resolution dichotomous image segmentation. CCAI, 2024. 69, 70, 73" + }, + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.286, + 0.871, + 0.325 + ], + "angle": 0, + "content": "[128] Chunting Zhou, Lili Yu, Arun Babu, Kushal Tirumala, Michihiro Yasunaga, Leonid Shamis, Jacob Kahn, Xuezhe Ma, Luke Zettlemoyer, and Omer Levy. Transfusion: Predict the next token and diffuse images with one multi-modal model. arXiv preprint arXiv:2408.11039, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.329, + 0.871, + 0.367 + ], + "angle": 0, + "content": "[129] Donghao Zhou, Jiancheng Huang, Jinbin Bai, Jiaze Wang, Hao Chen, Guangyong Chen, Xiaowei Hu, and Pheng-Ann Heng. MagicTailor: Component-controllable personalization in text-to-image diffusion models. arXiv preprint arXiv:2410.13370, 2024. 28" + }, + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.371, + 0.871, + 0.399 + ], + "angle": 0, + "content": "[130] Zhiyu Zhu, Yingcong Chen, Zhenyu Xie, and Jingyi Yu. Disenvisioner: Disentangled and enriched visual prompt for customized image generation. arXiv preprint arXiv:2410.02067, 2024. 28, 29" + }, + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.402, + 0.871, + 0.429 + ], + "angle": 0, + "content": "[131] Silvia Zuffi, Angjoo Kanazawa, and Michael J Black. Lions and tigers and bears: Capturing non-rigid, 3d, articulated shape from images. In CVPR, 2018. 58" + }, + { + "type": "list", + "bbox": [ + 0.127, + 0.092, + 0.873, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "91" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05979/c3842c4d-63fe-4775-a7ec-8e60138be3cb_origin.pdf b/data/2025/2504_05xxx/2504.05979/c3842c4d-63fe-4775-a7ec-8e60138be3cb_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0bd7d16e55f83fb380338af6cb1f30e6565d3c61 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/c3842c4d-63fe-4775-a7ec-8e60138be3cb_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61c14c8ba2a812daeb78252e4afaaf1a686e5f2fc8e6cbf061f98ce175d021c7 +size 45924908 diff --git a/data/2025/2504_05xxx/2504.05979/full.md b/data/2025/2504_05xxx/2504.05979/full.md new file mode 100644 index 0000000000000000000000000000000000000000..fbd0ddde35b0105b5928feab14ebe1791f02ca7c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/full.md @@ -0,0 +1,3734 @@ +# An Empirical Study of GPT-4o Image Generation Capabilities + +Sixiang Chen $^{1*}$ , Jinbin Bai $^{2*}$ , Zhuoran Zhao $^{1*}$ , Tian Ye $^{1*}$ , Qingyu Shi $^{3}$ , Donghao Zhou $^{4}$ , Wenhao Chai $^{5}$ , Xin Lin $^{6}$ , Jianzong Wu $^{3}$ , Chao Tang $^{3}$ , Shilin Xu $^{3}$ , Tao Zhang $^{6}$ , Haobo Yuan $^{6}$ , Yikang Zhou $^{6}$ , Wei Chow $^{2}$ , Linfeng Li $^{2}$ , Xiangtai Li $^{3\dagger}$ , Lei Zhu $^{1,7\dagger}$ , Lu Qi $^{6\dagger}$ + +$^{1}$ The Hong Kong University of Science and Technology (GZ) $^{2}$ National University of Singapore $^{3}$ Peking University $^{4}$ The Chinese University of Hong Kong $^{5}$ University of Washington $^{6}$ Wuhan University $^{7}$ The Hong Kong University of Science and Technology + +# Abstract + +The landscape of image generation has rapidly evolved, from early GAN-based approaches to diffusion models and, most recently, to unified generative architectures that seek to bridge understanding and generation tasks. Recent advances, especially the GPT-4o, have demonstrated the feasibility of high-fidelity multimodal generation, their architectural design remains mysterious and unpublished. This prompts the question of whether image and text generation have already been successfully integrated into a unified framework for those methods. In this work, we conduct an empirical study of GPT-4o's image generation capabilities, benchmarking it against leading open-source and commercial models. Our evaluation covers four main categories, including text-to-image, image-to-image, image-to-3D, and image-to-X generation, with more than 20 tasks. Our analysis highlights the strengths and limitations of GPT-4o under various settings, and situates it within the broader evolution of generative modeling. Through this investigation, we identify promising directions for future unified generative models, emphasizing the role of architectural design and data scaling. For a high-definition version of the PDF, please refer to the link on GitHub: https://github.com/Ephemeral182/Empirical-Study-of-GPT-4o-Image-Gen. + +# 1 Introduction + +Over the past decade, image generation has undergone a remarkable evolution—from the early successes of GANs [35] to the dominance of diffusion models [89, 82, 26], which have significantly advanced image fidelity and diversity [37, 7]. In parallel, Large Language Models (LLMs) have achieved exceptional performance across diverse natural language tasks by scaling autoregressive next-token prediction, demonstrating the power of unified modeling principles. These advances naturally raise a compelling question: can such principles be extended to image generation? + +However, fundamental differences between autoregressive and diffusion-based paradigms present non-trivial challenges. Autoregressive models excel in sequential text generation, while diffusion models have become the de facto standard for high-quality image synthesis. Bridging these modalities within a unified framework remains an open challenge. Several works [96, 101, 100, 34, 24, 13] attempt to bridge this gap via multimodal connectors or instruction tuning, with LLMs serving as planning modules that produce intermediate representations for image generation. While effective to some extent, these paradigms often exhibit limited interaction between text and image modalities, and struggle with content consistency—particularly in image-to-image generation and complex instruction-based synthesis. + +To address these limitations, recent research explores unified generation models that integrate understanding and generation within a single architecture, following three main technical paradigms. The first line of work represents both language and vision as discrete token sequences [67, 98, 110, 104, 19, 65, 109], leveraging VQGAN [28] or similar compressors to tokenize images for compatibility with autoregressive models. A second direction integrates + +large language models directly into the diffusion process [128, 126, 112, 72], employing them as denoising backbones for image generation and as unified sequence models for text. While promising, these approaches typically rely on intermediate compression modules such as VAEs or VQVAEs, which may limit visual fidelity or increase architectural complexity. A third and increasingly prominent paradigm investigates discrete diffusion frameworks that natively support both image and text generation within a unified modeling space [71, 73, 93]. Building on this insight, recent works [58, 97] propose fully end-to-end diffusion architectures based on shared Transformer backbones, demonstrating competitive performance and seamless modality integration comparable to similarly sized LLMs. + +Despite these promising directions, such systems still lag behind the sophistication and generalization capabilities of proprietary models like Flux [51] and Midjourney [75], which may lack reasoning capabilities. + +The recent release of GPT-4o [78] marks a significant milestone in multimodal generative modeling. As a native multimodal architecture, GPT-4o demonstrates strong capabilities in generating high-fidelity, photorealistic images while seamlessly unifying vision and language generation—reportedly in an autoregressive fashion. However, its closed-source nature—particularly the lack of disclosure about its architecture, training regimen, and inference mechanisms—poses substantial challenges for scientific scrutiny. This motivates a careful empirical assessment of its capabilities relative to open-source state-of-the-art models. + +Although the visual performance of GPT-4o and Gemini is widely recognized, much of their success likely stems from unprecedented scale in training data, model parameters, and compute resources. Prior studies, including diffusion models and connected-based models, suggest that scaling is a key enabler of generative quality—potentially more so than architectural novelty alone. These trends point to a promising trajectory for unified generative models: with sufficient scale, they may rival or even surpass today's best proprietary systems. + +In this study, we conduct a comprehensive evaluation of GPT-4o's image generation performance, benchmarking its outputs against leading systems including Gemini 2.0 Flash Experimental [99] and other state-of-the-art models. Building upon our comparative evaluation across text-to-image, image-to-image, image-to-3D, and image-to-X generation tasks, GPT-4o demonstrates several distinctive strengths: + +- Exceptional Text Rendering Capability. GPT-4o demonstrates exceptional capability in rendering textual elements within images, maintaining correct spelling, alignment, and formatting even in document-style generation tasks. This level of text fluency is rarely seen in prior models and is crucial for practical applications such as chart generation, document layout synthesis, and instruction-rich visual storytelling. +- Compositional Generalization and Prompt Following. GPT-4o displays impressive compositional abilities, accurately assembling complex scene elements, styles, or attributes described in prompts. This high prompt following enables it to handle fine-grained multi-attribute conditions in generation tasks with minimal loss of semantic detail. +- Spatial Reasoning and Multi-View Consistency. In generation tasks involving spatial manipulation, such as 3D view synthesis, camera control, and depth-conditioned rendering, GPT-4o maintains geometric consistency and viewpoint realism. This indicates an inherent capacity for spatial reasoning and structural awareness, even without explicit 3D modeling modules. +- Comprehensive Image Transformation Capability. GPT-4o shows strong generalization across a wide spectrum of image-to-image tasks, ranging from low-level image restoration to high-level perceptual understanding. Without task-specific tuning, it almost handles diverse transformations such as denoising, deblurring, relighting, segmentation, and depth estimation. This suggests the model has learned robust visual priors and spatial semantics, enabling it to perform correction and abstract structural prediction under a unified framework. + +However, limitations remain in inconsistent generation, hallucination, and data bias in underrepresented cultural elements and non-Latin scripts, highlighting current trade-offs in model design and training data coverage. + +While we do not analyze the internal architecture or implementation details of GPT-4o in this paper*, we believe it plays an important role toward unified multimodal generation. We also emphasize that model architecture is only one part of this progress—training data, model scale, and optimization strategies are equally important. We hope future work will provide more empirical evidence to better understand such proprietary systems and their position within this evolving research landscape. + +# 2 Evaluation + +As GPT-4o's image generation capability has only recently been released and no API is available, we conduct only qualitative comparisons between GPT-4o, Gemini 2.0 Flash [99], and other state-of-the-art models in their respective domains. + +To systematically compare these models' performance across diverse image generation tasks including text-to-image generation, image-to-image generation, text/image to 3D generation, and various image-to-X generation, we conduct a detailed case study focused on analyzing the performance of these models. This qualitative analysis provides insight into gpt 4o's strengths and limitations in various tasks, as shown in Table 1. + +Low Visual Quality : The image synthesis model fails to generate fine-grained object details or produces blurry outputs. Typical cases include distorted human bodies or unrealistic hand shapes. + +Inconsistent Generation : The image synthesis model produces inconsistent output or image details with input image. + +Lack of Knowledge : The image synthesis model lacks domain-specific knowledge, such as particular artistic styles, and thus generates visually plausible but incorrect results. + +Failure to Follow Instructions : The image synthesis model misinterprets the input prompt and produces inconsistent results. For example, it may fail to capture specified numbers, colors, or object arrangements. + +Table 1: GPT-4o vs. Baselines: Qualitative error analysis across image generation tasks. + +
Case FigureMeta-taskSub-taskGPT-4oGemini-2.0-flashDomain-SOTA
Figure 1SuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 2Complex Text FollowingSuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 3SuccessSuccessSuccess
Figure 4SuccessSuccessSuccess
Figure 5SuccessSuccessSuccess
Figure 6Text-to-ImageText RenderingSuccessLow Visual QualityLow Visual Quality
Figure 7SuccessLow Visual QualityLow Visual Quality
Figure 8SuccessLow Visual QualityLow Visual Quality
Figure 9Document GenerationSuccessLow Visual QualityLow Visual Quality
Figure 10SuccessLow Visual QualityLow Visual Quality
Figure 11PanoramaLack of KnowledgeSuccessSuccess
Figure 12Style TransferSuccessLack of KnowledgeLack of Knowledge
Figure 13SuccessLack of KnowledgeLack of Knowledge
Figure 14Low Visual QualitySuccessFailure to Follow Instructions
Figure 15Image EditingFailure to Follow InstructionsFailure to Follow InstructionsFailure to Follow Instructions
Figure 16SuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 17SuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 18SuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 19SuccessInconsistent GenerationFailure to Follow Instructions
Figure 20Single-Concept CustomizationSuccessFailure to Follow InstructionsSuccess
Figure 21Multi-Concept CustomizationInconsistent GenerationInconsistent GenerationSuccess
Figure 22Story Image GenerationSuccessFailure to Follow InstructionsSuccess
Figure 23SuccessInconsistent GenerationSuccess
Figure 24Low-Level Vision-DenoisingLow Visual QualityLow Visual QualitySuccess
Figure 25Low-Level Vision-DerainingSuccessInconsistent GenerationSuccess
Figure 26Low-Level Vision-DehazingSuccessLow Visual QualitySuccess
Figure 27Low-Level Vision-Low Light EnhancementLow Visual QualityLow Visual QualitySuccess
Figure 28Low-Level Vision-DeblurringSuccessLow Visual QualitySuccess
Figure 29Low-Level Vision-Super ResolutionSuccessLow Visual QualitySuccess
Figure 30Low-Level Vision-ImpaintingInconsistent GenerationInconsistent GenerationSuccess
Figure 31Low-Level Vision-OutpaintingInconsistent GenerationSuccessSuccess
Figure 32Low-Level Vision-ColorizationSuccessSuccessSuccess
Figure 33Low-Level Vision-Shadow RemovalSuccessFailure to Follow InstructionsSuccess
Figure 34Low-Level Vision-Reflection RemovalInconsistent GenerationFailure to Follow InstructionsSuccess
Figure 35Low-Level Vision-RelightingSuccessFailure to Follow InstructionsSuccess
Figure 36Spatial Control-CannyInconsistent GenerationFailure to Follow InstructionsSuccess
Figure 37Spatial Control-DepthSuccessFailure to Follow InstructionsSuccess
Figure 38Spatial Control-SketchInconsistent GenerationInconsistent GenerationSuccess
Figure 39Spatial Control-PoseSuccessInconsistent GenerationSuccess
Figure 40Spatial Control-MaskInconsistent GenerationFailure to Follow InstructionsInconsistent Generation
Figure 41Camera ControlInconsistent GenerationFailure to Follow InstructionsSuccess
Figure 42Failure to Follow InstructionsFailure to Follow InstructionsSuccess
Figure 43In-Context Visual PromptingFailure to Follow InstructionsFailure to Follow InstructionsN/A
Figure 44Image to 3D ModelingSuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 45UV Map to 3D RenderingSuccessInconsistent GenerationFailure to Follow Instructions
Figure 46Novel View SynthesisSuccessSuccessFailure to Follow Instructions
Figure 47Image SegmentationFailure to Follow InstructionsFailure to Follow InstructionsSuccess
Figure 48SuccessFailure to Follow InstructionsSuccess
Figure 49SuccessFailure to Follow InstructionsSuccess
Figure 50Edge DetectionSuccessSuccessSuccess
Figure 51SuccessFailure to Follow InstructionsSuccess
Figure 52SuccessFailure to Follow InstructionsSuccess
Figure 53Salient ObjectSuccessFailure to Follow InstructionsSuccess
Figure 54SuccessSuccessSuccess
Figure 55SuccessSuccessSuccess
Figure 56Depth EstimationSuccessFailure to Follow InstructionsSuccess
Figure 57Normal EstimationSuccessFailure to Follow InstructionsSuccess
Figure 58Layout DetectionInconsistent GenerationInconsistent GenerationSuccess
Figure 59Text DetectionFailure to Follow InstructionsFailure to Follow InstructionsSuccess
Figure 60Inconsistent GenerationInconsistent GenerationSuccess
Figure 61Inconsistent GenerationInconsistent GenerationSuccess
Figure 62Inconsistent GenerationInconsistent GenerationSuccess
Figure 63Inconsistent GenerationInconsistent GenerationSuccess
+ +# 2.1 Text-to-Image Tasks + +# 2.1.1 Complex Text Following Capability + +Recent progress in text-to-image generation has shown impressive abilities in generating diverse and realistic images based on text prompts. However, composing multiple objects with various attributes and relationships accurately into one scene remains a significant challenge for current text-to-image generative models [92, 85, 8, 81, 6]. In this section, we assess models' ability for compositional text-to-image generation from four perspectives following [41], which include attribute binding, numeracy, object relationship, and complex compositions. Attribute binding evaluates whether the model correctly assigns attributes, such as color, shape, and texture to the appropriate objects. Numeracy evaluates whether the number of generated objects matches the quantities specified in the prompt. Object relationships refer to both spatial (2D/3D) and non-spatial interactions among objects. Complex compositions evaluate the model's ability to handle multiple types of constraints simultaneously, especially given long or detailed prompts. + +As shown in Figure 1 row 1, GPT-4o outperforms both Gemini 2.0 Flash and Midjourney in numeracy tasks. While GPT-4o accurately represents a single plate, Gemini 2.0 and Midjourney represent two plates instead. In terms of understanding object relationships, GPT-4o is the only model that correctly infers the action "walk towards" from the ragdoll to the labrador. However, GPT-4o struggles with more complex terms like "pentagonal pyramid", failing to interpret it correctly (see Figure 1 row 4). This suggests that GPT-4o may have difficulty accurately interpreting objects with unusual geometries. When it comes to abstract prompts, GPT-4o also appears to lack imagination (see Figure 2 row 2), whereas Midjourney v6.1 demonstrates better creativity in this case, outperforming both GPT-4o and Gemini 2.0 Flash. + +For complex text-to-image generation, we evaluate GPT-4o's performance with Gemini 2.0 Flash [99] and FLUX.1-Pro [51], using the text prompts collected from [124, 106, 115]. As shown in Figure 3, both GPT-4o and FLUX excel at generating realistic and harmonious scenes align with the text prompts. However, we observe that GPT-4o shows limitations in generating culturally related elements. For example, the generated crown for the Chinese general is western-style rather than chinese-style (see Figure 4 row 2). Additionally, in large scene generation, GPT-4o struggles to maintain boundary continuity, whereas FLUX produces a more natural composition (see Figure 4 row 3). + +Overall, we conclude that GPT-4o excels at text-to-image generation in terms of attribute binding, generative numeracy, object relationship, and complex compositions. However, it exhibits limitations in generating uncommon objects, culturally specific elements and in maintaining continuity when composing large scenes. + +# Text-to-Image Generation + +![](images/5e416a9762e2def779608eb1bf6eee5a9cee49745a628b45ccc17073ed461705.jpg) + +# Evaluation: Visual content precisely following the text instruction. + +![](images/b56cffa92dcc8965f109102c1a0975dedfc665539ea8cf6c71d5a2879dfe8dcc.jpg) + +![](images/e6d59fa9484b3877034764c9fa8752e51c78a7b4eba2a3dd224cdb9236f23368.jpg) + +![](images/114bf170640575674f20a5f89dd3e1c35830102990e7d45e9fe0e42bf870b7d0.jpg) + +![](images/e21293e1dde8f2f8a8736c0a4d673afa08874092081ea4b1ec07788bbe177b05.jpg) + +![](images/cde20841ace4621a197135b25fda935e422854fc542a786ca15f9aea2eb46486.jpg) +Input Text: "A yellow bowl, a blue mug and a pink plate on the table." +Input Text: "A ragdoll walks towards a labrador." + +![](images/d0a18282d6404ef0ab645148e73d17fe445982eb8c40cb3c0d5a9081efd6d52c.jpg) + +![](images/3bffa6f0cb5545336d1dece3985a3fcaff2ad60da5c231db47b90005c63f4ceb.jpg) +Input Text: "Three differently colored apples (yellow, green, red from left to right) with a Coca-Cola bottle placed behind the middle apple." + +![](images/f333d581ad570f753e99a7e73c99d827c89d746ae6f1f549c53cf30cf7e7ea33.jpg) + +![](images/107a161e8c64a21e6ae887d967ee6aa6bf1c940aafacc290e429d8deb9516754.jpg) + +![](images/00b435bd1d8026aa4f0d08be8a9579479f625012cae88f49f81a5d958cf5c199.jpg) +Input Text: "The oval sphere was nestled between the rectangular prism and the pentagonal pyramid." +Figure 1: Task: Compositional text-to-image generation. Evaluate the image-text alignment on attribute binding, numeracy, and object relationship. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o outperforms Gemini 2.0 Flash and Midjourney v6.1 across all aspects. However, GPT-4o struggles with uncommon objects with a special geometry. + +![](images/b1e74b98b6b45c314db43ad44efec736de0526ea53933c41d6f494dfc222ec5c.jpg) + +![](images/ec488303c09dbf9dfb728fd1952368c1c3fa85f708e3b290ecd1a36b50ef4803.jpg) + +GPT 40 + +Gemini 2.0 Flash + +Midjourney v6.1 + +![](images/41cf37136782a8224523753684cab11c0ddb36c12dfd39ee89ac851285853af2.jpg) + +# Evaluation: Visual content precisely following the text instruction. + +![](images/742bdab0de5d04587ee79a5c9d93f90cfb4494e5a48dcf258a1406c6265bd627.jpg) +Input Text: "The round, juicy watermelon sat in the cool, refreshing bowl of ice, waiting to be sliced open and devoured." + +![](images/c73e3aa340f33cab4cf19354249e37fe1bd7f76aef6ec7fbcc154198f1dad05b.jpg) + +![](images/382030f3e3f0298bca0a14e5cda55ee5859de8f05e70a6182873cc5b53987e77.jpg) + +![](images/9a87f98dd86139de0d38246d54b1d070591e2fdb526d3285409a473583f28ec6.jpg) + +![](images/fce6b55e09781aa60141751625be4a55c7d0fd1eef584be53a6585c781eade33.jpg) + +![](images/0f7ad2208e211f067756484bb7a6847038cf13549a2f34c8a13b12eeda49aad7.jpg) + +![](images/d87f5d38548a498be4acc2e6175a859f019a900cdcbd8394bd2700b4f145ed8f.jpg) +Input Text: "The bold, expressive strokes of the artist's brush brought the blank canvas to life, forming a vibrant and dynamic masterpiece." + +![](images/c87b7b6792c5fda9d5340959a376273a7f5d0b80db6770aea6ec9e15c6ec596d.jpg) + +![](images/4d7cf29e3244126ed31062342f32c73674817be1d0079f2da561d4e75f3368e2.jpg) + +![](images/c36a7bcd11820fd2f7d3c63fbf0e563dbdebdbcf2828507130fec8b86c2c1cc1.jpg) +Input Text: "The heavy raindrops fell on the smooth glass and the textured roof." + +![](images/0e3a0dc448dd3f289f1837b24241f0ae490d1d79799115f52e41475efc038437.jpg) +Input Text: "The gentle, soothing melody of the piano filled the concert hall, as the pianist's fingers danced over the keys." +Figure 2: Task: Compositional text-to-image generation. Evaluate the image-text alignment on attribute binding and complex compositions. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o outperforms the other two models in generating objects aligned with the text prompts accurately. But for more abstract and creative tasks, Midjourney v6.1 performs the best. + +![](images/adcb856bb8a351cb62cc8d6fd9dcfe0f715a542653534f39e99b1973a7067ec8.jpg) + +GPT 40 + +Gemini 2.0 Flash + +Midjourney v6.1 + +# Text-to-Image Generation (with complex text prompt) + +# Evaluation: Visual content precisely following the text instruction. + +![](images/27fce9400300fb184ed43e1e4b4f8eb042ae16f63646b6eda324b41fff54aede.jpg) +Input Text: "An icy landscape. A vast expanse of snow-covered mountain peaks stretches endlessly. Beneath them is a dense forest and a colossal frozen lake. Three people are boating in three boats separately in the lake. Not far from the lake, a volcano threatens eruption, its rumblings felt even from afar. Above, a ferocious red dragon dominates the sky and commands the heavens, fueled by the volcano's relentless energy flow." (Prompt from GenArtist) + +![](images/116d28e49cd90395347ff38e9ec8e1c04b768757b89b7a9a1d0a0f0f317b20f4.jpg) + +![](images/bfaf1c18b294719962fa4fcaa9024d109045581572e81ebad1ebe756236b696a.jpg) + +![](images/64e6d603fb4bb84b5ebce439ab2fdb3c6e0e566e8af8982c1c50d5378b6fe487.jpg) + +![](images/7ed09698c0927be9038e1132fc7243c7e628f5e5c041880670c7d1ac190273ad.jpg) + +![](images/484e31e49c1e7a2ebd3c367ae0db8fcd10716584cec8c8e06beaca0f6c7a0381.jpg) + +![](images/2de6dd439640e1a491cd2669646ca4deab67272994343185f41956a82e6a40a1.jpg) +Input Text: "In a magical seascape, a majestic ship sails through crystal blue waters surrounded by vibrant marine life and soaring birds. Towering cliffs frame the scene, while a stunning rainbow arches across the sky, blending with ethereal clouds. This enchanting journey captures the serene beauty of nature's wonders." (Prompt from IterComp) + +![](images/0c0470a214490980a8b144c3e88a00976a14f32edb1762c91f3846a0c05ec5b2.jpg) +Input Text: "On the rooftop of a skyscraper in a bustling cyberpunk city, a figure in a trench coat and neon-lit visor stands amidst a garden of bio-luminescent plants, overlooking the maze of flying cars and towering holograms. Robotic birds flit among the foliage, digital billboards flash advertisements in the distance." (Prompt from IterComp) + +![](images/745ba50ac275ead363177aa6a6389523a01b4c236062fe89cb966d79aeafe69b.jpg) +Figure 3: Task: Compositional text-to-image generation. Evaluate the image-text alignment on complex compositions. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and FLUX.1-Pro [51]. Observation: GPT-4o and FLUX can generate more harmonious and natural scene than Gemini 2.0 Flash. + +GPT 40 + +Gemini 2.0 Flash + +FLUX + +# Text-to-Image Generation (with complex text prompt) + +![](images/30ed799a330abd36ec3b456792e50674b62123dda7d183a4909f198292186c06.jpg) + +# Evaluation: Visual content precisely following the text instruction. + +![](images/0a19bafce54aa9b28620506b9c6051449ff283c024b7b8bdde3dea6959a5f2ab.jpg) +Input Text: "Under the luminous full moon, a serene Japanese garden with traditional pagodas and a tranquil pond creates a magical night scene. The soft glow from the lantern-lit buildings reflects on the water, blending nature and architecture in harmony. The moonlight bathes the landscape, enhancing the peaceful ambiance." (Prompt from IterComp) + +![](images/1660f44ab6725c952d5b0bc43505f77c0d7cb4b552be04d6430ee2b25ec63d61.jpg) + +![](images/89e41678c8fcc68e299a5cf2931d67bb1f2339142bca0c32c1593b4888650519.jpg) + +![](images/f016fb55ce2f2b9f48a4d23f36970310500e1250abf50d5e88f1902507f424c6.jpg) +Input Text: "A Chinese general wearing a crown, with whiskers and golden Chinese style armor, standing with a majestic dragon head on his chest, symbolizing his strength, wearing black and gold boots. His appearance exudes a sense of authority, wisdom, and an unyielding spirit, embodying the ideal ancient Chinese hero." (Prompt from RPG) + +![](images/9077e20f899c60d5e1c26e1748dfa141980bf09da6c613467d42c1143bab34a1.jpg) + +![](images/c341891a5dd74ee440ca81c75da11adba9b78fd29a19903df5c98dbac7513e19.jpg) + +![](images/4569221e0b0de61258bfb9ba425fbed4cda37201c120d48edf92be4e3bff1d5c.jpg) +Input Text: "A beautiful landscape with a river in the middle, the left of the river is in the evening and in the winter with a big iceberg and a small village while some people are skiing on the river and some people are skating, the right of the river is in the summer with a volcano in the morning and a small village while some people are playing." (Prompt from RPG) + +![](images/475d909f8bd94a4e962a1af7f117f1db9f76e1f2cd7047ccbdf158cec0cbc2b2.jpg) +Figure 4: Task: Compositional text-to-image generation. Evaluate the image-text alignment on complex compositions. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and FLUX.1-Pro [51]. Observation: GPT-4o struggles to generate culturally related elements and maintain boundary continuity (see rows 2 and 3), similar to Gemini 2.0 Flash and FLUX. + +![](images/ef1d1bd4564e7e63c547c011879fd964bcadd7553b251ad9931c404cc35e496b.jpg) + +![](images/1086a817de2fda2fa73bc2221b337c167aa823711f8d87db081972a8c108577d.jpg) + +GPT 40 + +Gemini 2.0 Flash + +FLUX + +# 2.1.2 Text Rendering + +Text rendering is a task that aims at generating texts (characters, sentences, or even paragraphs) on an image. The text content is usually guided by the input prompt. Previous models [27, 2] show good capability in generating short text (within 10 words, such as signs or short phrases), but their ability to generate long texts remains limited. + +As shown in Figure 5, GPT-4o demonstrates comparable abilities to existing state-of-the-art (SOTA) baselines when generating short texts. All the methods except FLUX [51] perform well at rendering short text following the prompt. In this section, we primarily focus on long text rendering to examine whether GPT-4o can surpass these baselines for extended textual content. + +We choose POSTA [12], Gemini 2.0 Flash [99], Ideogram 3.0 [2], and Playground-v3 [64] as the baselines because of their established capabilities in rendering longer texts. The results are shown in Figure 6 and Figure 7. + +From these examples, we make the following key observations: + +- GPT-4o's strength in long text generation: Compared with other baselines, GPT-4o demonstrates a superior ability to generate long, coherent text. In example 1 and example 3, GPT-4o produces detailed textual information with fewer than three characters generated incorrectly across more than 100 characters of text. +- Baseline limitations: When the input prompt becomes extremely long, models such as Gemini 2.0 Flash, Ideogram 3.0, and Playground-v3 often produce significantly more errors or produce vague text patches that are difficult to recognize. +- POSTA's performance: As a model specifically designed for poster-style text generation, POSTA performs closely to, or in some instances slightly more precisely than, GPT-4o. We hypothesize this is due to its multi-step pipeline tailored for long text rendering. + +Overall, we conclude that GPT-4o excels at long text rendering, offering overwhelming performance compared to most existing commercial models, and delivering results on par with the latest specialized research models. + +# Short Text Rendering + +![](images/9d7e40286ddeb28b93be6916d8101d062aa91f512e1c1ac6b0ccc889ae647631.jpg) + +# Evaluation: Text Rendering Precision. + +![](images/08d9d7d0f4498fa74ab30be14fe461e1fd36819539ce56b0a1505126518de149.jpg) + +![](images/234feaba9c9e1d0f65d67e6df566f551271d9b6bc16b92cc18990362a5b21b2f.jpg) + +![](images/0ce5a44bd3cd7500b4b78db75c0f1ccc81baa684ffa77ebdb728c28b0fe9e785.jpg) + +![](images/204fddddcbcd5e6532fa8831a9afe9f12c4bf879dee56a592fddb35d74766418.jpg) + +Input Text: "A beautiful painting of flowing colors and styles forming the words 'The GPT-4o/Ideogram/FLUX/SD3 research paper is nowhere!'. the background is speckled with drops and splashes of paint." + +![](images/5d4b6580226201ab5042c29511d6da5a3409b9f5c7fee1d2d9c074d14f64765b.jpg) + +![](images/6ec2422a775f3f017131805ebc131bf30d8d32953e433ab7a7e35ce013d814bd.jpg) + +![](images/4f7ea6846a9f2e71ede47f66f8045aa234b77d2155c2f56eca68f95514f68e80.jpg) + +![](images/1b62b31f138cbdfe4ceec53b959cee36fc3c5f0c22524ab44623a5cd565ac9c8.jpg) + +Input Text: "Beautiful pixel art of a Wizard with hovering text 'Achievement unlocked: Diffusion models can spell now'." + +![](images/4ba3110052c7723a701e2ccf5e847ad8c21956fcb34a978c1e55cce340d0131c.jpg) +Figure 5: Task: Short text rendering. Generate prompt-aligned, concise textual content (typically within 10 words) on an image. Setup: Each sample is produced based on a guiding text prompt. Comparisons are made with prior SOTA models [27, 2] and FLUX [51]. Observations: GPT-4o achieves performance on par with existing SOTA baselines in rendering short texts, consistently following the prompt with minimal errors. All evaluated methods—except FLUX [51]—deliver high-fidelity results in this setting. + +![](images/4fed5bdfe6b3d24177ff8f96422315e81b3ae864616ce0f21770566bf7fd8891.jpg) + +![](images/83fabbd535e803f344bff99d3d53a6c8a71d95998a6a377d7bae9a19ebb0830a.jpg) + +![](images/63a0fda286dbcf6aab1f70694ab0576b25d5fd247827bdfa027aca3ce2984b08.jpg) + +Input Text: "A monkey holding a sign reading 'Scaling transformer models is awesome'." + +![](images/b44926116e89a328041a0572f04bcac45c5be77916af431b03c4a0ab99dd761f.jpg) + +![](images/cb9fbff8e79609bd0c0397e9ff8bdd26c4934e0c043bf90309793e247c53acde.jpg) + +![](images/ecba2242bac62c8676019c58e03ec1fc8ba9f66e7431827caf98cf3565ae7429.jpg) + +![](images/eb143f4361a33ce174515f230c11a216ac7590d56a2fc328a2adc862245f0ce7.jpg) + +Input Text: "A surreal and humorous scene in a classroom with the words 'GPUs go brrrrr' written in white chalk on a blackboard. In front of the blackboard." + +GPT 40 + +Ideogram 3.0 + +FLUX + +SD3 + +# Evaluation: Text Rendering Precision. + +![](images/afce16d9cf439bcdc04fa94f8ca4f2227195b2cf36f175b299fa1b98664ddbfa.jpg) +GPT 40 + +![](images/c6b354d42faa888d692b35bdfbc5cd4599018152cf0064da6cd13e513960b86e.jpg) +POSTA + +![](images/f4cd9c8e039a64fce1592ca2bb40e8f9f9bf8f8c530f65984c76ff6708a91c6f.jpg) +Gemini 2.0 Flash + +![](images/c5582fe68220a5a384efa294ef35989ebef4760bd5b2897bd0ee23ceffe344a0.jpg) +Ideogram 3.0 + +# Input Text: + +"Generate a movie poster with a sci-fi space theme, a solitary figure standing on an alien planet, facing a massive outpost. + +The poster displays the following text: + +Title: The Last Outpost + +Subtitle: When the stars fall, the truth rises + +# Information: + +Produced by Jackson Ward + +Music by Aria Calloway + +Screenplay by Elena Sharpe + +Directed By Sylvia Hartman + +"A visually stunning and narratively gripping exploration of the unknown. The Last Outpost masterfully blends elements of science fiction, mystery, and psychological thriller, creating a hauntingly atmospheric journey that will leave audiences on the edge of their seats." -- Global Film Review". + +![](images/e1dc82e6233835aef84e7c0d62fd27535b3b4aee7f6bc4beab765dacdbbdc4c1.jpg) +GPT 40 + +![](images/59f2bdbb4c18ff5b2eaf1af8462de2d557b98afb5403da1a76ec1f639e6017a6.jpg) +POSTA + +![](images/3c2edc361f25ec06384618fcf44a118efef67ed2c14c3a957eec1c7c432abf66.jpg) +Gemini 2.0 Flash + +![](images/1751faef203209061a6fae572da8f65fff10f9f74ef783748441d19f7f4a2be9.jpg) +Ideogram 3.0 +Figure 6: Task: Long text rendering. Generate extended, coherent, and prompt-consistent textual content on an image. Setup: Evaluations are conducted against advanced baselines including POSTA [12], Gemini 2.0 Flash [99], Ideogram 3.0 [2], and Playground-v3 [64]. Observations: GPT-4o excels in long text rendering by producing coherent, detailed textual information with very few character errors. In contrast, models like Gemini 2.0 Flash, Ideogram 3.0, and Playground-v3 often exhibit increased errors or generate vague text when faced with lengthy prompts, while POSTA's tailored multi-step pipeline sometimes yields competitive precision. Overall, GPT-4o outperforms most commercial models and rivals specialized research approaches in extended text generation. + +# Input Text: + +"Create a poster with the theme of a Journey of Solitude. The background should depict a lone figure walking toward an unusable form of transportation. The scene should evoke a sense of being lost, helplessness, and desolation, capturing the emotional weight of losing oneself in a barren, unforgiving landscape. + +Title: Solitary Journeys + +Subtitle: Elara Voss + +Information: WANDERING THROUGH THE UNKNOWN". + +# Long Text Rendering + +![](images/072dabf79d619daa8eeb4cd2f8867859c2a5525ca4ceff1ee839637ee46f829f.jpg) + +# Evaluation: Text Rendering Precision. + +![](images/37c0d9d0ba1eed41834771a2d8df3690d2b37294a08a5844702161e53aa817ab.jpg) +GPT 40 +Figure 7: Task: Long text rendering. The Setup and Observations are the same as Figure 6. + +![](images/61b9d5a9c147c446387301c4f13acd7ceeccc06f653954410c1dc398d1fd4bc3.jpg) +POSTA + +![](images/96c1a55c7c5f312a71c89d34f708b0e4f8da4556922ffdee41d673b76ea82dad.jpg) +Gemini 2.0 Flash + +![](images/faf4eca5a3720dff227ce6a4ad8f4898de848e4e08fec90adf87952d14446f7e.jpg) +Playground-v3 + +# Input Text: + +"Please generate an artistic and stylized promotional poster. The style is an artistic painting style. The theme is about nature and city. The poster displays the following information: Title: Fragmented Harmony + +Subtitle Between the steel and sky, life finds its way. + +Information: Amid the towering strucions and the quiet persistence of nature, a delicate balance emerges. The complex and often contradictory relationship between urban development and the natural world reveals itself in fleeting moments of harmony. Though fragmented, life continues, threading its way through the shadows of progress. Here, conflict and coexistence form an intricate dance--sometimes at odds, sometimes in unexpected unity". + +# 2.1.3 Document Generation + +We also explore a novel task: document image generation with GPT-4o, comparing its performance with Gemini 2.0 Flash [99] and Playground-v3 [64]. As shown in Figure 8 - 10, GPT-4o produces document images with cleaner layouts and more consistent content. + +# Document Image Generation + +![](images/7ff25ac80d044220739d61284585a070ee9142eede21fe8dbf47dff3cb2ffa9c.jpg) + +# Evaluation: Text Rendering Precision. + +# Attention Is All You Need + +Ashish Vaswani Noam Shazeer Niki Parmar Jakob Uszkoreit Lilion Jones Aidan N.Gomez Lukasz Kaiser IIIa Polosukhin + +# Abstract + +The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on tw machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. + +# Attention Is All You Need + +Ashish Vourakis, NM Nazarov, NJI Parmar, Jbab Udekoreli, Lillion Jones, Adiinil S. W. Coomce, Lakota Kiber, Pala Poslashov, and M. A. D. G. R. Smith. 2017. The neural network models are based on recurrent or conventional neural networks in an encoder-decoder loss. The best produced models also connect the encoder and decoder loss through an attention mechanism. As, we propose a new study mechanism for a machine, the Transformer, based attention mechanisms, dispensing with the same weight as the encoder-decoder loss. The results of the training tasks show these models to be superior in quality while being more parsibilized and requiring significantly less time to time to move train. Our model achieves 84.2 BLUE in the WMT-50 Go-to-translation task, which is comparable to the performance of our previous work [3]. In addition, our WMT-30 Pragmatic task, our model established a new single-model state-of-the-art-state-of-the-art BLUE score of 41.8 when $\alpha = 5$ training for 3.5 days on GPUs after fraction of the training costs of the best models from literature. We propose Transformer generalizes well by applying it successfully to English syntacticity parsing both with large and limited training data. + +# Attention Is All You Need + +Ashish A. [Al] You. Wea a nono' Ainon 1 +Ashish Yawani, Naqadzaree, laokotri +Alok Uzzotner, Anokalika Sanik, Jokoslav adar, Gosak III Ploosukhaini +Alok Uzzotner, Anokalika Sanik + +Antext, exotocic sequecra transoedscnncs on cbr be caes baccared on bracococcyne nort bell netiabon an ecocycclion, an ecocyclon. TeTrane: the ensonnnmnsn neeepnckian. Epipcnie rile kceely on meenctiny As adeterdiencr. +npopioors sonr tonarwamchim. I mtnr. vorti. inenpoea a dedusum minnyss onomcrh. cortordone.lora ontata tose or uin hoperiosper. +The rorner is s oovl dtt maive de acemnccnodkdu aleu cormunb-dlr bing dvl-ndr 016de and mechance +11 Dae nucnccnng +ceso nucnse +12 - eannnr er on attonne amehnes asnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnnccnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn + +GPT40 + +Gemini 2.0 Flash + +Playground-v3 + +# Input Text: + +"Generate A realistic screenshot of the first page of the Paper from the following information: + +Title: Attention Is All You Need + +Author List: Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin + +Abstract: The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data." + +Figure 8: Task: Document image generation. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and Playground-v3 [64]. Observation: GPT-4o can generate more consistent and accurate font and format than the other two models. + +# Document Image Generation + +![](images/e960d89a9332326f5fac58239c1d5784d9e237d7ccede93ea1c82462b3f589b3.jpg) + +# Evaluation: Text Rendering Precision. + +# BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding + +Jacob Devlin Ming-Wei Chang Kenton Lee Kristina Toutanova + +# Abstract + +We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result; the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. + +BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to $80.5\%$ (7.7% point absolute improvement), MultiNLI accuracy to $\mathcal{S}6.7\%$ (4.6% absolute improvement), SQAud v1.1 question answering Test F1 to 93.2 (1.5 point absolute + +GPT 40 + +# BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding + +Ashlor Jacob Doslin Ming Wei Chang, Kenton Lee Abstrut Win, Karinlin Touranosa + +We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Cryostatnet as Transformer. Unlike recnec mean, BERT is designed by pre-train deep bidirectional representations from nonshaded text by jasily selfconforming on both text 1st&4, xavier coint eorect, tate 1st&2 to win 1st, sacp0-twincent BERT model can be fine mused with just one additional output layer to create of dafve for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. + +BERT is conceptually simple and empirically powerful. It obtains now state-of-the-art results on eleven evertweaven multimoment language processing tasks, including GLUE score to $80.5\%$ (7.69 absolute improvement), pushing the GLUE alloselect improvement). MulaNLI accuracy to $\varepsilon_{\mathrm{M}} = 0.1$ , $\nu_{\mathrm{L}} < \varepsilon_{\mathrm{M}} / \varepsilon_{\mathrm{M}}$ (1.5 point absolute improvement) and SQoAD v2.0 Test F1 to 83.1 (3.1 point absolute improvement). + +Gemini 2.0 Flash + +# BERT: Pre-training on Lepi Bidellar Tansson Translons for Language Understond mderance litting' cetting from t cowf henvaming + +Author, List: +J Asbad Devlin, Yiw Changuaguagaa Kionn age rspgectangane cans + $^{a}$ pressin liKistcn-Toutanfa +represeons-Uintanlvania + +A + +We introduce a new languagegretrovetercendentiale monoclin klonstionist monole conBldfecarstaadss reprenters from nemer raje Sfiflnonanecones. desessnissrall ranauagaleafdelfe xyn unming on hnlaesaeare two ploddes also the por-entant canteletory a state-vwraon one-on-one coffice of anisotropy, and the ploso-syntropic colective of states. Of s2s212310 pain or questionbmporansuansluangene Tcnf?f to ingest ingf Sf10 tto 46 w. (I, test),marshersonalizne imnance immmens + +BERT is conceptually imlilienenarplenpholcft-nu surate-fine-ams +ronen 1 oonranaalwauanu viipopluoteforocnirandinns inget caught anourage, vovlurvulgina for nain. 2004. The use of the word "sulfur" in the text is a question envoing SUIF u697. +- GBFEscors/aanoreqquasurf and Squad w.10 aninlvalte 83.7% 4.6% (X) +- TeST fto onop:11.3% (x) +→ BERT is cponconuynlyrsnaintally pocefine-at-ut: ouvah176. (JET v.c.37% quinting anguen linyuH-aCLS sccorts onoonssthe sonea 4000A/AresoVc LEAU pioiHcB: gnrmaeh an epesrourinans A7c)0v o.o35 aed 1170 + +Playground-v3 + +# Input Text: + +Generate A realistic screenshot of the first page of the Paper from the following information: + +Title: BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding + +Author List: Jacob Devlin, Ming-Wei Chang, Kenton Lee, Kristina Toutanova +Abstract: We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. + +BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to $80.5\%$ (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement). + +Figure 9: Task: Document image generation. The Setup and Observations are the same as Fig. 8. + +![](images/9c2265499ec9f6a0c6e61a8914311c8b94d20424cd7a53a126ce5f30f3f6bf33.jpg) + +# Evaluation: Text Rendering Precision. + +# You Only Look Once: Unified, Real-Time Object Detection + +Joseph Redmon, Santosh Divvala, Ross Girshick Ali Farhadi + +# Abstract + +We present YOLO, a new approach to object detection. Prior work on object detection repurposes classifiers to perform detection. Instead, we frame object detection as a regression problem to spatially separated bounding boxes and associated class probabilities. A single neural network predicts bounding boxes and class probabilities directly from full images in one evaluation. Since the whole detection pipeline is a single network, it can be optimized end-to-end directly on detection performance. + +Our unified architecture is extremely fast. Our base YOLO model processes images in real-time at 45 frames per second. A smaller version of the network. Fast YOLO, processes an astounding 155 frames per second while still achieving double the mAP of other real-time detectors. Compared to state-of-the-art detection systems YOLO makes more localization errors but is far less likely to pred + +You Only Look Once: Unlimited, Time/Indirect Decciption +Author:Lesser Jowesh Redmonial Siptedthi Adri Farhad + +We present YJLO, a new approach to object detection. Driver search uses oleejeon cieeuses to represen or correct deferential finwication. Istegedel, when farstial, we confirmed anapase, agate trius to a signafal or sialipal staphylococcal preprobed boing boos summarilyour heartbodn frontal nater and the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the + +Acatat Rechun-Yorim Bogae lo rojctc filocly + +Our undersonged tandoce boe + ant - or cemoe - aepocemis in a times, bodingly narmabogus haarban. Jnci enwecstic ancoontie fluy fast YOLO pue moebes, 45f rans perennetioles unperenentio- dorensis in reel thane reobexes petarges. ectcylcfom oene trsoute of sucrose princeia of docuta. d. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .. + +
Abstr. LOC nomencl.
FascicristinFas1/3
MADS-eos-miR-fc::rec_mucmuc
fli-2 (C9-03)fli-25p438%
#868E1 (b-cd)#868ap33<%
#868E1 (b-cd, c-d)#868ap33<%
#868E1 (b-cd, d-c)#868ap33<%
#868E1 (b-cd, e-c)#868ap33<%
#868E1 (b-cd, f-c)#868ap33<%
#868E1 (b-cd, g-c)#868ap33<%
#868E1 (b-cd, h-c)#868ap33<%
#868E1 (b-cd, i-c)#868ap33<%
#868E1 (b-cd, j-c)#868ap33<%
+ +aAldofoi, aS; atalfoi: all other anatolian oboforators of otects. 1 oxtinct nands, aHs: all extinctions, for foxtroomes sereis (outcited) I am Alstomos, Rictus sp. to theft, to rott and aftall to sell otles, or licee, and so on. + +```bash +#renatIdack Corrgend to state-vtextction system to objects. CN in All and Chon +Pectmon Is dendrites into Commute on ooclastin or Donr Tnp, eutment the +imagery mod fringes to arf the Articn on A mitronin or aortothetotnoid +ned +#recognition VOLCOVOLD, Cogenture: GcGmCunlty: VOLCOVOLD + +GPT 40 + +Gemini 2.0 Flash + +# You Only Look Once: Unified, Real-Time Object Detection + +Joseph Redmon, Santosh Dlwala, Ross Girishk, Ali Farhedi Abstract + +We presentYOLO, a new approach to object detection. Prior work on object detection epires classifiers to perform detection. Instead, we frame object detection as a regression problem to spatially separated bounding beves and associated class probabilities. A single neural network, predicts brednis bounding boxes and class reliabilities directly from full evaluation. Since the whole detection pipeline is a singkwork, it can be optimised end-to-end directly on detection performance. + +Our unified architecture is extremely fast. Our base VOLO model precursors images in real-time at 45 frames per second. A smaller version of trieva, Faat VOLO, processes an aetounding 155 frames per second. ¥8 frames per second while clll achieving double the MAP of real-time detectors. Compared is sisl-site detection systems VOLO makes mark deterrms. VOLO makes more lecdiscipli predict fasse ertris to ter en ptiplicit false detections where nothing exists. Finally, VOLO, VOLO lesrs vs every revalur representations of objects all other detection methods, including DPN and R-CNN, by a wide when generalizing from natural images to artwork artwork on both on the Picasso Dataset and the People-Art Dataset. + +Playground-v3 + +# Input Text: + +"Generate A realistic screenshot of the first page of the Paper from the following information: + +Title: You Only Look Once: Unified, Real-Time Object Detection + +Author List: Joseph Redmon, Santosh Divvala, Ross Girshick, Ali Farhadi + +Abstract: We present YOLO, a new approach to object detection. Prior work on object detection repurposes classifiers to perform detection. Instead, we frame object detection as a regression problem to spatially separated bounding boxes and associated class probabilities. A single neural network predicts bounding boxes and class probabilities directly from full images in one evaluation. Since the whole detection pipeline is a single network, it can be optimized end-to-end directly on detection performance. + +Our unified architecture is extremely fast. Our base YOLO model processes images in real-time at 45 frames per second. A smaller version of the network, Fast YOLO, processes an astounding 155 frames per second while still achieving double the mAP of other real-time detectors. Compared to state-of-the-art detection systems, YOLO makes more localization errors but is far less likely to predict false detections where nothing exists. Finally, YOLO learns very general representations of objects. It outperforms all other detection methods, including DPM and R-CNN, by a wide margin when generalizing from natural images to artwork on both the Picasso Dataset and the People-Art Dataset." + +Figure 10: Task: Document image generation. The Setup and Observations are the same as Fig. 8. + +# 2.1.4 Panorama Image Generation + +Panorama image generation aims at creating a 360-degree view of a static scene, enabling immersive and comprehensive visual experiences. In our experiments, we select Pano-SD [119] and Gemini 2.0 Flash [99] as the baselines, with representative results illustrated in Figure 11. The comparisons reveal that while the baseline models can generate coherent panorama-like images with seamlessly connectable left and right sides, GPT-4o struggles to produce a true panorama. In most cases, GPT-4o generates images that approximate a panoramic view but still fall short in ensuring the necessary continuity across the image boundaries. We attribute this limitation to the insufficient representation of panorama images in its training data, as well as a predisposition towards generating images with a higher vertical aspect ratio rather than a wider one. Consequently, in the realm of panorama image generation, GPT-4o is inferior to the existing baseline models. + +# Panorama Image Generation + +# Evaluation: Is panorama image? + +![](images/1371bb3d27b320f551f1aebcf4ba83faae804c2369cc85b124cb8a6b9e8b9ec5.jpg) + +![](images/470abd7050918de412b06326cdd51c4cece55e61b2888569447852d1b87f591d.jpg) + +![](images/56f24172af8797ef15310df874ee2c02e4edcdae14624400e6196e1a2638ef82.jpg) + +Input Text: "Please generate a panorama image: A living room with hardwork floors, a fireplace, and large windows." + +![](images/0771e85cb3485940ea65217af54563edb5368d1ed7f290c2f9c5ad8bdccee8ee.jpg) +Figure 11: Task: Panorama image generation, aiming to create immersive 360-degree views of static scenes. Setup: We compare GPT-4o with established baselines such as Pano-SD [119] and Gemini 2.0 Flash [99] to evaluate the generation of coherent panoramic images. Observations: While the baseline models reliably produce panoramas with seamlessly connected left and right sides, GPT-4o tends to only approximate a panoramic view and struggles to maintain continuity across image boundaries. This shortfall is likely due to limited panorama image representation in its training data and a tendency to generate images with a higher vertical aspect ratio rather than a wider one, rendering it inferior to the baselines in this task. + +![](images/d163c0b8a05c2d83cbe87b0c94311aadf8a7505417a3639fcd47509e36995332.jpg) + +![](images/eba5be0e2a33e5791d2df2eae6ba493b1daf01dd71d6fba92adff187a344d471.jpg) + +Input Text: "Please generate a panorama image: A cozy study with built-in bookshelves and a leather." + +![](images/f735b915cfa2ba7a85ad7998eea453cc54fd14b2e904d305b4cea528c2185f2a.jpg) + +![](images/05f5665f542cca655067ebe6c202394a79f20e88be3af6125a418ad1ddb38552.jpg) + +![](images/ab89160a98b1c9923175245ecb1b778b6c8bedaa2a414c4e3a82178ef3554b45.jpg) + +Input Text: "Please generate a panorama image: A bedroom with a ceiling fan, gray walls, hardwood floors, a bed, and a TV on the wall." + +GPT 40 + +Gemini 2.0 Flash + +Pano-SD + +# 2.2 Image-to-Image Tasks + +# 2.2.1 Style Transfer + +Style transfer is a classic yet evolving task in computer vision, aiming to render an image in a specific artistic style while preserving the original content. It bridges the domains of vision and art, enabling applications such as digital artwork creation, film post-production, and virtual reality environment design. Early approach [33] used convolutional neural networks to separate and recombine content and style representations from images. This seminal work enabled the artistic stylization of photographs by optimizing pixel values to match a desired style. To improve efficiency, Johnson et al. [47] proposed feed-forward networks for real-time style transfer using perceptual losses. Later methods such as AdaIN [43] and WCT [57] enabled arbitrary style transfer without retraining for each new style. Transformer-based models like StyTr² [23] have been introduced to enhance style transfer quality and better preserve structural details. More recently, with the rapid development of image synthesis techniques, especially diffusion models, style transfer has seen further advancements in both quality and controllability. However, transferring specific artistic styles still typically requires a non-trivial amount of training data. + +To comprehensively evaluate the style transfer capability of GPT-4o, we conduct comparisons against several recent competitive models, including Gemini 2.0 Flash [99] and Midjourney v6.1 [75]. Specifically, Figure 12 illustrates style transfer results for natural scenes, while Figure 13 focuses on human facial images. Across a diverse range of styles, such as Monet, Van Gogh, Pixar, Cyberpunk, Snoopy, Disney, Ghibli, and Cubism, GPT-4o demonstrates consistently superior performance in both stylistic fidelity and content preservation. + +Notably, in the case of Ghibli style transfer, GPT-4o exhibits remarkable fidelity to the original artistic aesthetics, closely resembling the target style with vivid color palettes and soft contours. In contrast, both Gemini and Midjourney often produce inconsistent visual styles and textures. Furthermore, GPT-4o excels at preserving fine-grained content details, such as facial structure, earrings, clothing, and hairstyles, which are often misrepresented or lost in the outputs of other models. These results suggest that GPT-4o not only captures high-level style semantics but also maintains strong spatial consistency and semantic alignment. + +# Prompted Stylization + +# Evaluation: Consistency/style. + +![](images/0fe2326eaaff906398f84779d9f2f8a9b656dcf843adbe8b635b0ebccc3b64c1.jpg) + +![](images/41dc891dc5a7c3692e3d168e6b8d7aeecf6ee926ceb54032fac4c7482b335993.jpg) + +![](images/275a5a4359f968af20b14db343e03cc70f52a75880ac223fb413656701f9803b.jpg) + +![](images/87edc272818725170e7b6db73459109f3c3967fc70ae14b1e2acffc417d4290d.jpg) + +Input Text: "Generate the Monet style of this picture." + +![](images/750636006f7b07b0af11afa452e7037a2d9a06614ababcd014be1498c416fd65.jpg) + +![](images/1ccf7d1613f12e0b81fba30f86e01357733d489b633304f1597188ed4f258e67.jpg) + +![](images/a8a15f935780009ae4408165cc5d5abf2965efa5cead6b31539ab6f6570babf3.jpg) + +![](images/577ab214cdb8de2d03a7a2be9b878ed69afb604d28c0d21381d8da756093e973.jpg) + +Input Text: "Generate the Van Gogh style of this picture." + +![](images/c464a9f6f3f24ab0f65f5488c132ccea546a4af9ede651088ae6e7a2961516ec.jpg) +Figure 12: Task: Style transfer, aiming to render an image in a specific artistic style while preserving the original content. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and Midjourney v6.1 [75] on natural scene style transfer across multiple artistic domains. Observations: GPT-4o exhibits significantly better content preservation compared to Midjourney v6.1, maintaining fine-grained content details and structural consistency. In terms of style, it faithfully adheres to the textual description, effectively rendering vivid color palettes and soft contours that characterize the target style. This alignment notably surpasses both Gemini 2.0 Flash and Midjourney v6.1, highlighting GPT-4o's strong capabilities in preserving content and faithfully rendering diverse styles. + +![](images/67dd368a9fc83144d81f03926e3e616fb1c6241004b301cf8c36f99e0c45dd8d.jpg) + +![](images/badd5fb657f99aaab26d4d7e7efa36a6fb517d65faf566ed3fe8c5ae8f6f35d8.jpg) + +![](images/598045ae955aa43906186813c0efdb19e703aeb859e6842ca9badf105529c011.jpg) + +Input Text: "Generate the Pixar style of this picture." + +![](images/f68ac7b3f9c3b4527403386e94ea70946346ec05ae2dde329bd666632c16d005.jpg) + +![](images/de2131f5c3c16b1cda2aa0dec3ae6fe0689fffc4267105db8583bef2e64b0cad.jpg) + +![](images/a85c872e242b8a76e3a8010c80983519f0fb6a8346e50773ef2e7fab0b390d2a.jpg) + +![](images/1f67f57b31e0864c333e12afce49cff3e3a9e54902f8fabbbf522d8e1b2bcb07.jpg) + +Input Text: "Generate the Cyberpunk style of this picture." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +Midjourney v6.1 + +Prompted Stylization + +Evaluation: Consistency/style. + +![](images/7b58c3184e1b97ea204211d53437c27406bef28c23fb6a50586f6c0f1a5bfe24.jpg) + +![](images/4a7dfd47b3b4f45d4ea17721125d0fe632c1243e9f989c4ef96f7157ca1eac3f.jpg) + +![](images/1a4e0134e2181e538cfa34d801a9a29589af46e4564af10bde2b0a48bdef3123.jpg) + +![](images/0e6d7e6671a48677bfae227082d4c8fb8b48df29d71b55960f2c402f7f628630.jpg) + +Input Text: "Generate the Simpsons style of this picture." + +![](images/9e5e6d93ce20c260e565400b12fea89bd8bc529187af868dd712070ab1c328f8.jpg) + +![](images/8e5d8cc638f17208ad4fb443ff0683574d425e64d854400f82678e0227eca37e.jpg) + +![](images/aa5d8bd6655c3924c3860521fa7d3aabfae95679b8638674f96b892057031bdf.jpg) + +![](images/0459972536685fbb8896000173723ed46bcf8309ab5dd8cd3867ab99d79da23a.jpg) + +Input Text: "Generate the Snoopy style of this picture." + +![](images/70346a54c2888c50ce4473234e1e5a51c19e9b3df375364d24bb2a98a5a7153a.jpg) + +![](images/959c69e3cc69906dbb804b63c7b374843496df51ef6f1569b100b6937422b431.jpg) + +![](images/fc8ea4a8390ad6f9e57e8f5aab23790b3bd6070cec727042772246ae96706efb.jpg) + +![](images/4c59a95a5b0ba1c9ed0c407b8724156fd81dd1c21744e6e8075190bae52e10fd.jpg) + +Input Text:"Generate the Disney style of this picture." + +![](images/4dbac1245c6ad15005db4a1d3ccb5720d30e6be34a344f45e008b6f608fc4cb6.jpg) + +![](images/c9580af54f8032d511fbb97fa1439c5960b355c6d89a6da9548e03d6da674129.jpg) + +![](images/a24d3f139726d7234b378db7110157186e07c2e355a0860dbbefa990e5431c62.jpg) + +![](images/1c79b6d712c77204bf93747fafc74f99e0ed9cd9216c159ddbe9f67834fa0ee5.jpg) + +Input Text: "Generate the Ghibli style of this picture." + +![](images/899f06b2c3cf50b2d39323fff3222f827df1c1eb152fa1ed8c87460c8158613c.jpg) +Figure 13: Task: Style transfer, aiming to render an image in a specific artistic style while preserving the original content. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and Midjourney v6.1 [75] on human face style transfer across multiple artistic domains. Observations: GPT-4o exhibits significantly better content preservation compared to Gemini 2.0 Flash and Midjourney v6.1, maintaining fine-grained content details and structural consistency. In terms of style, it faithfully adheres to the textual description, effectively rendering vivid color palettes and soft contours that characterize the target style. This alignment notably surpasses both Gemini 2.0 Flash and Midjourney v6.1 far away, highlighting GPT-4o's strong capabilities in preserving content and faithfully rendering diverse styles. + +![](images/eae0730d55066371b3326e9ed3ab31f01ca11d2267e31b1a8c17fc7663f2714f.jpg) + +![](images/d7a7f1fc31d3a9490433f88485c2a950c61d2d98019f2ef1f9666ccb7987ad48.jpg) + +![](images/c65cc8644013933cd8a0d822cb884ca3b9ab0b6f9e149296ab44724e475fa681.jpg) + +Input Image + +Input Text: "Generate the Cubism style of this picture." + +GPT 40 + +Gemini 2.0 Flash + +Midjourney v6.1 + +# 2.2.2 Image Editing + +Image editing involves modifying the visual elements, composition, or data of an image to achieve a desired outcome. This process can range from minor refinements to significant alterations, while maintaining the integrity of the original image. Over time, image editing techniques have evolved from manual, labor-intensive methods to sophisticated AI-driven approaches. Prior works [10, 30, 9, 120, 5, 29, 4, 40] have demonstrated the ability to perform various editing tasks based on textual instructions, such as adding, removing, or replacing objects; altering backgrounds, colors, or styles; and adjusting the number, size, or positions of objects. However, these models still exhibit limitations in certain scenarios, particularly in preserving non-edited regions, maintaining consistent image characteristics, and ensuring seamless blending between edited and non-edited areas. + +We compare GPT-4o with MGIE [30], LEDs++ [9], MagicBrush [120], and Gemini 2.0 Flash [99], which are representative of current SOTA methods. These experiments evaluate GPT-4o's subject preservation and instruction-following capabilities to determine its effectiveness compared with existing methods. Comparative results are shown in Figure 14 through Figure 19. We find that GPT-4o achieves performance comparable to, and in many cases surpassing, SOTA baselines in image editing tasks. From these examples, GPT-4o exhibits the fewest failure cases, demonstrating a strong generalization ability across a wide variety of editing tasks. It consistently outperforms baseline models across multiple editing scenarios. We highlight several key observations: + +Strengths of GPT-4o in image editing: + +- Fine-grained editing: GPT-4o shows a superior ability to handle fine-grained editing tasks. For instance, in example 2 of Figure 14 and example 1 of Figure 15, GPT-4o successfully modified small, detailed objects such as a toothpick and pink ballerina slippers, outperforming prior methods. +- Substantial image transformations: GPT-4o excels at large-scale edits, such as background changes or object transformations, while maintaining visual coherence and realism. These complex edits require robust contextual and semantic understanding. Example 1 in Figure 16 illustrates GPT-4o's effective handling of a major background alteration task. +- Subject preservation: GPT-4o demonstrates strong subject-preserving capabilities, avoiding common artifacts such as facial distortions or component loss. In example 2 of Figure 14, GPT-4o retains the content of a drink that Gemini 2.0 Flash erroneously altered. Similarly, in example 5 of Figure 19, GPT-4o best preserves fuselage patterns and textual markings on an airplane. +- Instruction and original image adherence: GPT-4o shows a notable ability to follow instructions and maintain the structure of the original image, particularly in style editing and tasks involving object quantity, size, or position. This likely stems from its advanced understanding of both the image content and the editing instructions. For example, Figure 18 demonstrates GPT-4o's capability in style translation. Example 2 in Figure 17 shows its understanding of the term "orange" in both textual and visual contexts. A similar ability is illustrated in example 4 of Figure 19. + +- Limitations of GPT-4o in image editing: + +- GPT-4o underperforms in scenarios where strict preservation of the original image's lighting, shading, and color tones is required. In such cases, the edited images may exhibit noticeable shifts in visual consistency. This is evident in examples 1 and 5 of Figure 14 and example 4 of Figure 15. +- In some cases, GPT-4o may fail to retain image details outside the intended edit region. For instance, example 4 in Figure 14 shows a degradation in image quality in non-targeted areas. + +In summary, GPT-4o demonstrates substantial advancements in image editing, showing exceptional capabilities in detailed and large-scale edits, subject preservation, and adherence to instructions. While there are limitations in strictly maintaining original image characteristics such as lighting and tonal consistency, GPT-4o significantly reduces failure cases and outperforms existing baselines across a wide range of editing tasks, pushing the boundaries of current SOTA performance. + +![](images/cde0ac3d92eb933391cdf6879af267478293459d8efb075cb2c215e135eaf5de.jpg) + +# Evaluation: Instruction-following / faithful. + +![](images/413f16191d537cd336a6645867fb4044e2d27954d8e97a6f61443fdadd988968.jpg) +Input Text: "Add a notebook to the desk." + +![](images/e6edd2726b0a0b32b22ba7d28c24eb54d68706defe2d03d01109b73577922099.jpg) + +![](images/2b5cbcdbe86ff5d8016d28bf0a81a16d30075a3c50e64b84bfd2e7a4f9dbfb54.jpg) + +![](images/9b6616b3e3e491d5719d2c2596fedf5f8396a133bfd0edae1a8a64d343c4cd8c.jpg) + +![](images/e1866d261c2cafcac4f4dbe2ed648662bbfe95d442c888ae66ee515ecf40b804.jpg) +Input Text: "Put a toothpick in the top of the left sandwich." + +![](images/b169d0c531726750bbaeeb2fde03b21e893573395b4260fafdf640d946589a90.jpg) + +![](images/f8056f09456f348f5dfebcc2e29caddc6eaffd73644a4b2e58c236fd4f647220.jpg) + +![](images/58100ddfbfd9cfbbcbe07188ce06d08a6990468de6ef91ce1596ca7e44f0513d.jpg) + +![](images/ab999568dff4abd05c3fdb65bddbc6ea19103d123770203b58e1c0014cd42fd2.jpg) +Input Text: "Change the goats into moose." + +![](images/b33cf7d522604acf3a5bd5c343b02f71b892c5585b03a7601c2a93da774f7f45.jpg) + +![](images/0f95244911c8dc29653de6ffbe04b37fc4545e698fba41f0e9c612f8ef2eea1a.jpg) + +![](images/c13377bc306094edb2168ae5ee5698264067e0fb2c892c3a361247e6688d7d37.jpg) + +![](images/454c99016219342e261335a7e82a9c5e7a05daa487fd74aa7d33328c05c7b4b4.jpg) +Input Text: "Replace potatoes with baked beans." + +![](images/2f5b1c3c3a38c0ef1182e319f7bb487e17c8dec0cf207f9ddae29bd0055c93dc.jpg) + +![](images/33b1124e10743638cb062747979a788b7ede97dab752dac0e2e8cac1db1a2516.jpg) + +![](images/460d34550756a1743afe1027e9058e86963ba9586255d860b0f2b1c270b9280e.jpg) + +![](images/bc1073fb4ee6a5f6762fbba75598ee620fd0b2f89f1eb8f7b28e02c9a973045e.jpg) +Input Text: "Change the fire hydrant to a parking meter." +Input Image +Figure 14: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MGIE [30]. Observations: GPT-4o achieves higher success rates than MGIE (examples 2/5) but occasionally alters unintended elements (bread in example 4) or lighting/shading structures (example 5). This likely stems from stronger generalization capacity and creative adaptation focus in training, though reduced fidelity suggests insufficient constraints on structural details during fine-tuning. + +![](images/448a4c7fafc6c14e57593c609ff7476f3fc55ea22624bc38b74099fe39fc507a.jpg) +GPT 40 + +![](images/c5822478e8f8995312ffe08cd952a629abcc29449b18ab827236ad66d1633b87.jpg) +Gemini 2.0 Flash + +![](images/daa8c1bb0acfd6e425d089f419400308811f1212d5b44f851d12ef8b15bbe500.jpg) +MGIE + +# Image Editing + +![](images/2929ca2301ebf3a6fed0fa5d79fbf4b686d60eeaac8dec6a4e0cf76ff49e665e.jpg) + +# Evaluation: Instruction-following / faithful. + +![](images/93239b7986d7e146b41b57dc317de85153c1f5d166ec11db11dfd7fe2702e17c.jpg) +Input Text: "Turn everyone shoes into pink ballerina slippers." + +![](images/ab95d11bd4370f4489f59f00bf95b0420cf3629bf46e660886023f3f96118d89.jpg) + +![](images/6cecd99cb28dc56f82b143d8d1bfc81a9ffb34558b21ff22a2629368564a9e5f.jpg) + +![](images/c8841acf64bd37b3e001194394bdc34bdbf3ce6e360349fc571e6d6f84b0e03a.jpg) + +![](images/5ef2471bae5d6ac28f80fd9606df44d177b953395bf0f4d7328ae08ca174b1ee.jpg) +Input Text: "Remove the fence from in front of the horses." + +![](images/530cb7230b747fa4e841052b66f6154fe575e082392dc9ebee75fb61c0ee6728.jpg) + +![](images/1c82b96334f0b7ff5da3c6643db4451a39f618d24f04fa9a677945a841872526.jpg) + +![](images/70e41d785dd49e7ac3f98b75bebf61abc711768215fa3f8fc2105d70e0467d9d.jpg) + +![](images/2884734e16a9fec622bef494287b9ffd3440adb4d2a9f14797db74ec8bd44225.jpg) +Input Text: "Remove the baby elephant in the picture." + +![](images/c4aa3522a28b388224935e752868d0c11d026b182a9b1476c099f85eecf8b27d.jpg) + +![](images/6bd024f68b4e3ada55b3eb581d5cca2d29125289402827758186fcce06573946.jpg) + +![](images/2865dc07722a294ea85dfd9841947ab7e799e3d91a0f4a9092b0ff0c6fddd5ac.jpg) + +![](images/abf7f3e688bfa59a5ecc1db78f61491254ec021fedeea5c85187c6562809d61c.jpg) + +![](images/625aa6127eca041e5810dc4b46c00716834bba9bb7fb4e2613067d7135aba20f.jpg) + +![](images/fbfad470b3b72d3478e91210af29255466f94c98c96fb529b634e931bd2f9848.jpg) + +![](images/5447a1f82a340e52710bd0c108dac73285aab065976ccc90d3243a192c03666f.jpg) + +![](images/013887f45aed224843b4df7ea2aba71451637e932d1143fbfcacbd27271f65f3.jpg) +Input Text: "Change the yellow hat into a cowboy hat." +Input Text: "Remove the people from the background". +Input Image + +![](images/8acbf5757555da90dab4610d0040e9baac69b0e9e18c478fba61f00f601266a2.jpg) +GPT 40 + +![](images/ef4e183b7dfd1b0ebe989bb80028076acf9467e603aef017798bb91bb385489e.jpg) +Gemini 2.0 Flash + +![](images/29d11d5b373c8c6684c115fc01b10b1a340f12dbedfc7dfb4b61051e6911c7fe.jpg) +MGIE +Figure 15: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MGIE [30]. Observations: From examples 1-3, GPT-4o shows higher success in fine detail edits and large-scale edits with occlusions. This likely stems from GPT-4o's stronger contextual understanding and ability to infer missing or obscured elements, enabling more precise localized edits and coherent large-scale modifications even with partial visibility. However, it sometimes erases non-target elements (e.g., the house in example 5) and significantly alters global lighting (example 4). + +# Image Editing + +![](images/a5f0857371c912d73812959c5154aa5f588c9c53b0cac8d82d05e68ed4b24ba2.jpg) + +# Evaluation: Instruction-following / faithful. + +![](images/7fae08c7d278ec0abfd7dfa99c03539fe6218331e03f2ae1578f75bab7ed9747.jpg) + +![](images/7b6fe8843d08c99ead6fe3a01df0d1b2466ad14914ede1de9b670e85d3294614.jpg) + +![](images/18ac5988622ec524574bc3cacfb9b7b32ff24f0458ab883762dd159f18758912.jpg) + +![](images/64f0d91151654ae4516f7d303ba48c4e01706725697a79b75bb9494acae439d4.jpg) + +Input Text: "Change the background to the set of a nickelodeon game show." + +![](images/577dde2d7debd868c1455d80030c181d453b6b7c4530b75d54e42bb0bc034596.jpg) + +![](images/03db7b6af0e509243f783b8ce2d6a7d6f8244e6d1c3cca245f733119d0adc4d8.jpg) + +![](images/526a547e833ad8f409b85457a4b260fbfa64463b2a497a191035f2bdb24ac6b7.jpg) + +![](images/777c4e2019957c2a3491ab6ec8e5dbcb0731c4d26a35844fe6edc39ec5266953.jpg) + +Input Text: "Have the dog prick up its ears." + +![](images/4717d46544cd52b4e89e284f9ced1f798ee955d4101bcead34498865073ada80.jpg) + +![](images/f8b04a681ef9c0a1c0fb445e1ea16e42cbc375d735002dad0a66db67be113463.jpg) + +![](images/67a212cd49f8bf5728997494e42d4d92f076bc6f5c1e66b1dd42cfdcaaf6779c.jpg) + +![](images/8f600bc759b57a060077f8d020e12edc5a0d30fdd50d923a4ba69ef3ab7e3b8a.jpg) + +Input Text: "Have the elephant's tail raised." + +![](images/3986b51693c8099322835c4b144aa50d43241eafeb2e294ac53df6b06556a42b.jpg) +Figure 16: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MGIE [30]. Observations: From Example 1, GPT-4o demonstrates superior performance in style editing, effectively interpreting style instructions and preserving global image structure—a capability lacking in baseline models (MGIE, Gemini 2.0 Flash, and MagicBrush, as will be shown later). This likely stems from its stronger cross-modal comprehension and structural awareness during training. + +![](images/f91e7c322f8dc4a9e7e58df1c8b1d035d289d156992fbfb80f834d5f06e3e4d2.jpg) + +![](images/9ff188c0ba980c8e75a69ac4f116a1d53d641501e8a2976781fdf333d07f29ca.jpg) + +![](images/a179e4417d79d0b36fecc454a7c170f44da2165e686f30f95e1e62f5dacec7e9.jpg) + +Input Text: "Change the background to Vatican City." + +![](images/15fc7c5a66f48467e502347e843669f0689718ebbba95832bb0c704a6633d0b7.jpg) + +![](images/285c8caa7cc67cd2c1b2e70ff31cb3ba4484255739d14c8d701b34ac2e79ec36.jpg) + +![](images/aff34b2fc3ec9bfe6b6d30adc9b2581aab5368e8efd4ea317e48bc4626796721.jpg) + +![](images/e70d73a385cda6554925808b97816d9c1e848d834e5052992231cc4954cfd336.jpg) + +Input Text: "Change the background to Mount Rainier." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +MGIE + +# Image Editing + +# Evaluation: Instruction-following / faithful. + +![](images/0b872b88a7a28e69a1daae9c6423253e4db8e407800572b004fd616d814d9b24.jpg) +Input Text: "Add a white hat to the woman's head." + +![](images/2c9aaa16beceac6929cab3c491e5386e4870c1879821502bc939b57f21a74ac6.jpg) + +![](images/63887670dc86e68d25f445742e627570bc0bce4be5ac6001afe0c309c31c6b0a.jpg) + +![](images/f1c73606a27398749d1cdb7f461d7eb75573cba818d1bde958bce44ebacb6dbd.jpg) + +![](images/3fae030462e9df055665edfa8c3602ad144dba999e49b471e3dfd45d576c435a.jpg) +Input Text: "Delete the oranges from the shelf in the image." + +![](images/bff611326012e121ed651fc6f65158c4598f63fd2b8ac3ecac83f8c3ed9bba15.jpg) + +![](images/40c5465d2843a6e6f41aaecc277b509f4346d85fd64000b3badbfe3d66eb079e.jpg) + +![](images/784eb3154634368e8aff984cdd03ed74a47d81368a315b4c0bbe827a0051146d.jpg) + +![](images/2893eb40cbf1268dab0c08335defc17fff61e8a62c9f7f88cbb40f2797ff174b.jpg) +Input Text: "Get rid of the water the elephants are walking through." + +![](images/136b9b62bc4d4cdfd994c33d5283fe146823a2feb7d76000b30d2a22fbb628a3.jpg) + +![](images/48b98ec70c96ab029b0a162958ffc7baf258a979ab48209783328fd17cc0c608.jpg) + +![](images/0aa8c1c419160769a182de4039193ca181bf320312a187d89844da7d1bcd55dc.jpg) + +Input Image + +GPT-40 + +Gemini 2.0 Flash + +LEDS++ + +![](images/d4b9f29b0ab478363d67fd9b4ba47dd4d96e7162137bd71d2ff0b6ad6599db77.jpg) + +![](images/1d87a3ecf1aa5cd61e4772d303263ffdce0edea148e97f37a796904330cd58ee.jpg) +Input Text: "Show the seal raising its head." + +![](images/69763d65a9a155bb94fbc23e0c07823b4fbfe54ae0150f8bf1838f34853a3bce.jpg) + +![](images/c55a85d62ceb01b432fd7559421cb62d05bd7e42574b82a7254f0ddc252a5fb1.jpg) + +![](images/c8b6d0e8d66c6539cd6cfb4284248840031b6937e228c7f897330acc92dadf26.jpg) + +![](images/f206c5532d96bfcc1cf723bce40c254819c6dcc8f7c3dab25bd20471924a9c5d.jpg) +Input Text: "Change the sky to stars at night." +Figure 17: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/LEDITS++ [9]/MagicBrush [120]. Observations: From Examples 2 and 3, GPT-4o demonstrates stronger comprehension of instructions involving 'the oranges on the shelf' and 'the water the elephants are walking through', translating this understanding into more accurate edits. This suggests better grounding of textual prompts in visual context during generation. + +![](images/f0852ac207fe5ce1fa4118aefca299ece81ad07b3b1dabb16edd46a65c5a542c.jpg) + +![](images/4f92cfa09792f7a936729795c4faf6f6742488baef7a2394db6b9a5dc3b73e04.jpg) + +Input Image + +GPT-40 + +Gemini 2.0 Flash + +MagicBrush + +Image Editing + +![](images/8b47755995c00dd9ee49f071a5906d93f0c148923ccaa69fb15f0c73140fc71c.jpg) +Evaluation: Instruction-following / faithful. + +![](images/f7744cbca6f65d6b7b76f3ed15020d3fc9b6f8ed98f636028e5ced70384c52bd.jpg) +Input Text: "Change the image to a 1950s Flintstones cartoon art style." + +![](images/c667c273e0d72e65f57befa30d7eac7de39e3dd5500f8b8982bf964a2af0bfd0.jpg) + +![](images/a52dd1eb6b418ed5ae687d895e95de05feea7dd01a9e70453d3ef0959530bc3a.jpg) + +![](images/58dcd2fc462e78a9dc9f7d7d52dd5084aa6aafb0d48039e3f826d0102bca067b.jpg) + +![](images/55dc8086726bc727505f262393f9f58520886c8655e9a928d6c1c6186955bbfd.jpg) +Input Text: "Change this into a cubist painting." + +![](images/d6c8b55a284c54268e71caa1c9af85186c073992a08e7df944a4827df416126b.jpg) + +![](images/208b4450c1e449eb5c1aec21ac1e69f0930c6ad52d64be0770fb28058b41a6de.jpg) + +![](images/c7a9ad7f6f46ee29f8dc6c06aea221cccb760335b906b633f9630413215ff700.jpg) + +![](images/28ce32f62ab276444f6b71d92bf45eedf3ce7200dfed382d7460f96631f73071.jpg) + +![](images/662a88ccc330476d2b5521ea30b676e65a0af1c7ce47b24527e5328cb5d23989.jpg) + +![](images/4f6fc9b2404193b922604fdb04aa7ba553fa2808adea7ae850000fdbcf1459d2.jpg) +Input Text: "Make the image appear as if it's a woodblock print by Hokusai." + +![](images/3ae48117c4aba996216a7bc491ef96a7ef76812eb31cfce77a2b900f543504dc.jpg) + +![](images/e58eec2c9254e2da8414f358c4e5148a84d45f6a54957dab8cb6f162f5233635.jpg) +Input Text: "Change the background to Fushimi Inari Taisha." + +![](images/cca5f714390c8193eca135b0bd9a7e4da0d407fca7b6a93aba9022833c487d0b.jpg) + +![](images/742bbe6bc7ddca81a49887d3b626885d9ffe8e90e72e044edaaa388cf541ef05.jpg) + +![](images/2c704075d95d45fe4f0f7a7b678bfc45ee73af1834623f4fe6e02497499106c1.jpg) + +![](images/7397b99d3dbd4f1f9b6ee30e701e62c0f7e12919bfa41da478790936477b2c34.jpg) +Input Text: "Make the image appear like a Rembrandt painting." +Figure 18: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MagicBrush [120]. Observations: This set of examples further demonstrates GPT-4o's robust capabilities in style editing and background modification, consistent with the findings previously presented in Figure 16. + +![](images/f4667249d38b5a17f284e530b43ccc19dae4b4df74a383b000d041b25e9cc575.jpg) + +![](images/a0db75620e3e1969b865f1e6ae4a41bb07983621337b59c87edc39c85287a2dc.jpg) + +![](images/5f20c93306fe70d43acd29c720c491e8d5d808966bbaa918395e1a87b2755d64.jpg) + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +MagicBrush + +# Image Editing + +![](images/fd02f523b32582d773f60da26889163fcfeb85e2ad61c1cbbf337661f949aa5b.jpg) + +# Evaluation: Instruction-following / faithful. + +![](images/cdc522f7841c2077f741a6f72a86ede9c15c561e6b84d17afd15ae44085b29cf.jpg) +Input Text: "Make the image look like a cartoon." + +![](images/d89d446208b65160e03ce4ced290520055aa6e2461f4822a704190db34fdc06c.jpg) + +![](images/ae6de595371f09d4b568904e9ac0c1c094ae221ff86659caaecc10d0524ac9df.jpg) + +![](images/bb239bc12ad173112db53edc8c9ee898e6ae74bcd56f4a9d4d2f4e2aa118d2f3.jpg) + +![](images/777ec73ae37e58c0b5c2111e81cad23a6a071d747be202d05b8e482979d29b98.jpg) +Input Text: "Change the bike frame to be shiny metal instead of red." + +![](images/e4187d7ed6a1b514ec5676db3d2003f937bfa937564597d410716eae507bb1ca.jpg) + +![](images/eb16bd445147255cf3980142ad981069b415244e78b0a5cc390ff3c9e7af61b2.jpg) + +![](images/7b45aa773deb1657966b4ed79bd2b961c599f2a4ba279796de04e8ddce076662.jpg) + +![](images/6a5555b889ead9b7c54a679f8554efb5478d55c2bbab8a2783cd24e9f0e28abb.jpg) +Input Text: "Change the table color from blue to black." + +![](images/c86c4b988057b5e5ca08b92e4c09d8111a097f908d27efcd8da7c2ac29c5583a.jpg) + +![](images/fafbcde737a7a7fc8337bc987984a76a03a10ed89b37798dab11796bc6e03f52.jpg) + +![](images/d02f5e582a519b9d44e58b3ed2a49efbf23380fc85233c22949e12e735fc0378.jpg) + +![](images/ee337d58007e700256aa61fb4d68f998f7a15c4a2662d8db4f8d622da1b1e1fb.jpg) +Input Text: "Change the woman's hair to be all blue." + +![](images/c9364ac74a276a68b6a4096cb5d76160b136e7879dc4af294489f3c2d5723738.jpg) + +![](images/91b32ed2b4041faded4ca926aa6a533ba22ec3c950274f75ac5ae3556ef420f0.jpg) + +![](images/50fb798f21796fa55105c09f2e99d39e009e8f0617af0664a4836dbe70e314d0.jpg) + +![](images/91219e5a27a8f4d0eede45256f0b86e5561a80dee1a5c603cab39235011f504d.jpg) +Input Image + +![](images/7e6bb403295173dc6c676365988dcd5fd4a615b8d4e09058c0895add12fbd47c.jpg) +Input Text: "Make the color of the airplane be yellow instead." +GPT-40 + +![](images/2c0a4cd420ef9d5e4634c2962449526f8adf2f9dc3c5b4ca6622d4b56c4d08ad.jpg) +Gemini 2.0 Flash +Figure 19: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MagicBrush [120]. Observations: Example 4 highlights GPT-4o's superior image understanding—accurately distinguishing between hair and a scarf (where MagicBrush fails) to execute the edit. In Example 5, its precise retention of the plane's logo and text further demonstrates robust object-preservation capabilities. + +![](images/217541d90806a38fd995d3e22236061f85eb7bc63300a2acde2bd0f1910bb8aa.jpg) +MagicBrush + +# 2.2.3 Customization + +Customization, also known as subject-driven generation or personalization, aims to enable visual generative models to generate visual concepts from given reference images. Initial methods [31, 91] have achieved this by optimizing text embeddings or model weights. Subsequent approaches [50, 36, 46, 125, 94, 129] expanded on these approaches to handle multiple visual concepts. Customization plays a crucial role in making visual generative models more flexible and applicable across diverse domains. By empowering models to adapt to user-provided inputs, it ensures outputs are tailored to specific visual concepts. This is particularly significant in industries such as artistic creation and advertising, where individualization and creativity are paramount. + +To evaluate the performance of GPT-4o in this challenging task, we collect reference images from previous relevant works [130, 103], and conduct qualitative comparisons as shown in Figure 20 and Figure 21. For single-concept customization, we compare GPT-4o with Gemini 2.0 Flash and DisEnvisioner [130]. The results demonstrate that GPT-4o not only faithfully reproduces the visual concept from the reference image but also accurately adheres to the given textual description. In this task, GPT-4o significantly outperforms Gemini 2.0 Flash and achieves performance on par with the SOTA customization method. However, the images generated by GPT-4o still exhibit some "copy-paste" artifacts, leaving room for further improvement in the future. For multi-concept customization, we compare GPT-4o with Gemini 2.0 Flash and MS-Diffusion [103]. In this task, GPT-4o can still achieve competitive results for customizing multiple visual concepts in different contexts. Unfortunately, it struggles with certain unique combinations (e.g., making a dog wear a human dress), which could be attributed to the lack of relevant customization training data. + +Overall, GPT-4o demonstrates impressive performance in both single-concept and multi-concept customization tasks, showcasing strong concept fidelity and great text alignment. Despite some limitations, GPT-4o achieves remarkable results on par with SOTA customization methods and outperforms Gemini 2.0 Flash. + +# Customization (Single concept) + +![](images/646e79ac9f1f81f041625ce775812c9530257492f90e5afea41349ce5cd6894a.jpg) + +# Evaluation: Corresponding visual concepts of given reference images. + +![](images/7b9ec0dd32124c06ec282d499d80f72d58efd6c6b8339cfbdfe41eea456580d6.jpg) + +![](images/29e4e012635440b4b8fd5440387c6bcf7b8f25f3e0e7c0ea56e381f52b1a1451.jpg) + +![](images/f596fda615ad13c09d3de7a59c67e2adf0d99aa3880879a7dfca06b8ddd317e8.jpg) + +![](images/ed7f35bce8d09ae0a591d4b7e14edd5bbe5913c727af2763219ee8eccf546612.jpg) + +Input Text: "A dog on top of a purple rug in a forest, with reference to the attached image." + +![](images/c337d9db266ab1888bf435abcc2ea4dfede05853a0b6a6b5906b13f424c95dc6.jpg) + +![](images/be6c16bf461ad6ab94168d3e79ef58fabfe4403b526f0dc3a29bc696d3d3531e.jpg) + +![](images/58f6525436f8706212ce32abd6aa2f4ba0d58a7cd53654d0f24fecf81b3fb9cb.jpg) + +![](images/b05322a8b0f75be84fd68e7685029fe222a8456412836c464982ecfc2b379f16.jpg) + +Input Text: "A cat wearing a Santa hat, with reference to the attached image." + +![](images/2766a0e15a98adf29692d023a9e4c39fef5c3e5ec591c397ca4c3600d9eb3690.jpg) +Figure 20: Task: Single-concept customization. The goal is to generate images that faithfully reproduce a single visual concept from reference images while aligning with a given textual description. Setup: Reference images are collected from prior works [130], and results are compared across GPT-4o, Gemini 2.0 Flash [99], and DisEnvisioner [130]. Each row includes the input reference image, text prompt, and the corresponding outputs. Observations: GPT-4o demonstrates strong performance in faithfully reproducing the single visual concept with high fidelity while adhering closely to the given textual description. It consistently outperforms Gemini 2.0 Flash and achieves results comparable to the SOTA method DisEnvisioner. However, some generated images still exhibit minor "copy-paste" artifacts, indicating room for further improvement. + +![](images/b6d6e46c7e1fd50306d7fb544b81d3bcbe88cbf2672995aac7590c083fadec72.jpg) + +![](images/decdf079eb5a7ceb8546733501c7687db436872547911348307fba42a262e86d.jpg) + +![](images/27b3efd0800340d8acae187e104fb4c863298773ee0e13233df9fd4a8ff2e810.jpg) + +Input Text: "A pair of glasses with a tree and autumn leaves in the background, with reference to the attached image." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +DisEnvisioner + +# Customization (Multiple concepts) + +![](images/9f7208b84dbf5365d061b28896f055f01299d12b30af1dee82093163431a17e5.jpg) + +# Evaluation: Corresponding visual concepts of given reference images. + +![](images/2982b017c59027bd916adf4c71fec2ef4c7b20ea6fcb1b3e05ccead2d64e7e70.jpg) + +![](images/a43b10404face144826ade37cce23e7383b09b5f6616cd1ba4fb27d65847035f.jpg) + +![](images/08504d0ac60ae0b5850f6667bc744a4e3d743395a161f0de6a775051bb6f7278.jpg) + +![](images/6217bbdb7e9c6d3d88bbced40c11ad8ad80d6cc7c0af8b951f80ab29474cc67e.jpg) + +![](images/b359a631d7b7dee251d05aec14e46acaab4300aca8d16ae17240156e943e2d83.jpg) + +# Input Text: "A dog wearing a dress in the snow, with reference to the attached images." + +![](images/7e7b185220c6a783809189a0317a2ebff5e610e53e31f229bc61cc66cb9833ff.jpg) + +![](images/cb7b5802882687334bb83b37842a711b7d6066ee9b162b7773203fa931b58dae.jpg) + +![](images/24acb3bb969c56cf9068bc75a47163bb4b2fd5cfb83a6ae51eec791707338fa8.jpg) + +![](images/f93e5e0c608562728e8855f8158f8f8dfca8a94d4114bd194f042654bffe10c0.jpg) + +![](images/9e7f0f5aebdeb1b02b10a56ee4521570fb0755150225325cdd221a08f72aeae6.jpg) + +# Input Text: "A flower with a barn in the background, with reference to the attached images." + +![](images/aab60cd3ceb814eae695e39c17c9ede66cb9a44a4700601d9febe20289cdba35.jpg) +Figure 21: Task: Multi-concept customization. The goal is to generate images that effectively combine multiple visual concepts from reference images while aligning with a given textual description. Setup: Reference images are collected from prior works [103], and results are compared across GPT-4o, Gemini 2.0 Flash [99], and MS-Diffusion [103]. Each row includes the input reference images, text prompt, and the corresponding outputs. Observations: GPT-4o achieves competitive results in combining multiple visual concepts, showing strong fidelity to individual concepts and alignment with text prompts. However, its performance declines with unique or complex combinations. Despite this, GPT-4o outperforms Gemini 2.0 Flash and achieves results on par with SOTA methods. + +![](images/b7a34b10991c1b9ad9cd6f27bad980e361343453a99acd05c8354db9560287b5.jpg) + +![](images/ab685c4d565c19e94d0af9de2b15ce72d5e4d59e9fb5279055cc29a626f1a509.jpg) + +![](images/e4a1fb76e31c6e6095d35539c83419338ff04dfd9cc1cb7d1e9c18ca4ecabfab.jpg) + +![](images/de9e123e1e1147256977150c0054e8e9a10a47b47074aa699979adc065b6358c.jpg) + +# Input Text: "A backpack and a stuffed animal in the jungle, with reference to the attached images." + +Input Image + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +MS-Diffusion + +![](images/d617caa5b4e6e92b1428018122e2e89713f4978830db0e21a7ee846b88ce3163.jpg) + +![](images/896b8444337aa0e1e16c6afd486c4b4f5296517582f2b9dbf6bc16114d53d2ab.jpg) + +![](images/8217fbf0fc579e87f7d27802daffe76c3b9b1cdc80d7434cf08598ef201ca65b.jpg) + +![](images/e3951f420a6bebec8d982ae86c6d63b89a8e9d80db44d4a5af3d22ff2a0dabf6.jpg) + +![](images/0a9ad99bf92f46485bb850e72e6f3103fbdbc9cd19afa549dae69229296ce436.jpg) + +![](images/1e13756c7b144754ea9efd5eae0b5683845af5444f13a90c830d2492dfbd9bbd.jpg) + +# Input Text: "A lantern, a clock, and a backpack on a cobblestone street, with reference to the attached images." + +Input Image + +Input Image + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +MS-Diffusion + +# 2.2.4 Story Image Generation + +Story image generation is a task to generate coherent stories based on input text narratives. The conditions may also include the first story frame or character images. We choose Gemini 2.0 Flash [99], StoryDiffusion [38], SEED-Story [111], and DiffSensei [108] as baselines, due to their proven ability to generate coherent and expressive story images and their public availability. The results are shown in Figure 22 and Figure 23. + +In the first example, GPT-4o and StoryDiffusion successfully generate a three-panel short story about a fisherman, whereas Gemini 2.0 Flash fails by producing a single panel that appears to combine the three story narratives. In the second example, the story narrative is longer, spanning 11 panels. To evaluate this scenario with GPT-4o, we instruct the model to generate story images sequentially—using the input image and all previously generated images along with the corresponding text prompts. As shown in the figure, GPT-4o is capable of generating a long story with consistency. In the final example, we examine a Japanese black-and-white manga style with multiple input character images. GPT-4o is able to generate coherent stories, though it exhibits minor errors in character consistency (notably with the depiction of the woman) and misalignment with the input narrative (the narrative requires 7 panels, but only 6 are generated). The baseline Gemini 2.0 Flash performs worse, failing to preserve character status and the correct number of panels, as it also produces only 6 panels. Conversely, the DiffSensei model demonstrates superior performance, likely due to its specialized design and training for Japanese black-and-white manga generation. + +In conclusion, while GPT-4o achieves comparable performance to current baselines in story image generation, it shows limitations in specific scenarios—such as Japanese black-and-white manga and precise character status preservation—when compared to methods specifically tailored for those tasks. + +# Story Image Generation + +# Evaluation: Subject Consistency. + +![](images/9f84d4d4f227df36ae2d8eb29101d274a1877c545a2468371a5088741bc3cd8f.jpg) +GPT 40 + +![](images/821b29da41b0b8024feaf0f1d198abc77e613c7dd6beb41244e3ee04340d25a7.jpg) +StoryDiffusion + +![](images/40ffe89df9c50ac3b8034fed7000e9df015d18b39c605d05fc2cb755261800bb.jpg) + +![](images/7c0c2c96243ce0e8cc18e9a4df91dbe8e7777df73105a2ceb18d177a514d9735.jpg) + +![](images/73cf810872486276d591d5946a9ef3dc5549af56b3140dbc1d46cd76cf11381f.jpg) +Gemini 2.0 Flash + +# Input Text: + +"Draw a story about: + +An old fisherman in a cable-knit sweater and boots + +1. Laying out a picnic solo +2. Rowing a boat at dawn +3. Stargazing with a telescope". + +![](images/9aa5032b866731f07d0e37da2dd1017f2e4122d3413aa5ce54be7e8880b33931.jpg) +Input Image + +![](images/9b0a74f7db3b0699ac94506c383fb22b1af1f2bd911d7fa4312ee9ca289612ef.jpg) + +![](images/ea5e48f5561ad6c4fe1193a647524607970cfe7cdc63cd079944cf82880cecfe.jpg) + +![](images/d68465b01fad9263615792827618d6bc3ea2ac4994279661c281ea4561a93647.jpg) + +![](images/48601f43a53bfc38e48cf7cca862cb1f50808dd99f5c4af851716147ddce8e55.jpg) + +![](images/c03c17cff30fad0446b3d264e031d30a0f68b4ac841df4e2880f2381e0399ca9.jpg) + +![](images/c8ad21ac9388fd5c3e7f05f8a3c2192c73560b7737abef6737af98782aedeab8.jpg) +GPT 40 + +![](images/85f94fe9cc62a3b120dd03edaa6ef6f69b02976c1603bf7df955e05359a6cb30.jpg) + +![](images/1c6fcddb57e3da5cf397a2753308ce3877a635582a5355e1b66875d5fb6b8636.jpg) + +![](images/0419b8779eb79040a7d1330971acc7342fec2dd90c7626958522a6fb5f0fcf8e.jpg) + +![](images/505a6db0b4602aea76566f436b70be7da6453677d5c873c5267b35cc43b24a51.jpg) + +![](images/ccf71f9041dc2d2f4458a2e0113d7a09f60467f15834ca798f39accb6bbf34e8.jpg) + +![](images/bcd70602d56a2f46a9663b8b18aa7be1f8cc70645914af87662cc424f8bd98e1.jpg) + +![](images/a1117bdf2682b8d7115f12b7af60c126adce2d44e56036f3f9677ed0f6fa8cda.jpg) +SEED-Story + +![](images/5daa456ff2e454b0f91f88203eed2a998e30a44e393325bd1cc55a49937d3939.jpg) + +![](images/073d5da23f5b7f75f55840c7e941a2744edbcb0a242ace828bcaf195e2c9978e.jpg) + +![](images/aa32d16435cd0e8550916c3f311f8776216cf8e0260b722c1418d0d7619083ed.jpg) + +![](images/a492c7a006b896e7436ce9df3705f82414a33538f91a0c7ba1f64667941a3dc1.jpg) + +![](images/0abc90b5e3bcbd0cb683654abfcc56966ce72e50c6e2cbbe3c61540b34a5b998.jpg) + +![](images/36ad8877a81602152fef02eb279de3452a2d3bcd29e3cca2508e6eb27da3dd21.jpg) + +![](images/e9288a0c02092fd53245823396ca1f76b2df5abca1ee697a871b5004f18dce58.jpg) + +![](images/aba75b8ad4c326892ca96755807604f259178d54af19a83236adab4115951198.jpg) + +![](images/b13f052399947f2c4da3a2882a7878f31fc79514f3fe8a809951a32ca1140118.jpg) + +# Input Text: + +"Draw a story about George, a monkey: +1. He looked around with a curious expression, wondering what adventures awaited him. +2. Suddenly, George heard a noise. ... +3. To his surprise, the noise was George's friend, a small brown dog ... +4. George and the dog then played a game of hide and seek. George hid behind a couch ... +5. The next day, George and the dog decided to explore the city ... +6. George stopped on the city sidewalk, looking up at the sky ... +7. George then noticed a building with a reflective glass ... +8. George and the dog stood in front of the building, looking up at the lit windows ... +9. They were in a room with a door, waiting for their friend to join them +10. Suddenly, the door opened, and a man in a yellow suit walked in ... +11. He seemed deep in thought, unaware of George and the dog watching him from below ...". + +Figure 22: Task: Story image generation. The goal is to generate coherent story sequences based on narrative text, optionally conditioned on initial story frames or character images. Setup: Each example combines an input narrative (and, when available, reference character images) with a series of generated story panels. We compare outputs from GPT-4o against Gemini 2.0 Flash [99], StoryDiffusion [38], and SEED-Story [111]. Observations: GPT-4o exhibits strong narrative coherence and panel continuity, matching or surpassing general baselines. + +# Story Image Generation + +# Evaluation: Subject Consistency. + +![](images/45c315f9d4b26f49ce32764e260ffb24ab385bf338d6917e2d37d6b26b242016.jpg) + +![](images/f4ad4ea5d8987493e7a7c6a58f347e56802ce312612d8ea2a144cda053cdf994.jpg) + +![](images/084f24a984613e1c05f7c5e2fa9817167a1fbac88e5cd473c3386ae1bd4b9322.jpg) +Input Images + +![](images/536ebeb2091f4d9765dfff67b32c88a8b7e2f773ca113d0bf1fe677432f050ca.jpg) + +![](images/89edb9b677a275a49f6618e18a5f5744c28971461e4544202201e438f92139f3.jpg) + +![](images/186181b2b63acba5d62a3720407b17286c90fad94c34b8ea5cef23a65cfbf1ec.jpg) +GPT 40 + +![](images/28bf76b0adb60b3e53902f81e3d33090021fb2e1147bd03fb08fac858f7ac972.jpg) + +![](images/9bba0dc289d8ec29ff37642b0bc4d84feafbf8e7b361e6e17186d7b033c6b5be.jpg) + +![](images/8ef5824e83d6803a07b614317f4d908c06c18331051e08b8eef6e454c1be8073.jpg) +Gemini 2.0 Flash + +![](images/6043908cb4a51385380e40a2264f62f78e48ba2368469bbca5ad6f2dde9482cf.jpg) + +![](images/94c4c44bd6d4d882958c8eed91e70b3ca5d10dea1f870d92a7661befb223ea99.jpg) + +![](images/072676f5a96fdb59a2c5611f9262a740e1f39908971d09d9835789f46d395625.jpg) +DiffSensei +Figure 23: Task: Story image generation. The goal is to generate coherent story sequences based on narrative text, optionally conditioned on initial story frames or character images. Setup: Each example combines an input narrative (and, when available, reference character images) with a series of generated story panels. We compare outputs from GPT-4o against baselines including Gemini 2.0 Flash [99] and DiffSensei [108]. Observations: GPT-4o shows minor shortcomings in precise character consistency and panel count in specialized contexts, such as Japanese black-and-white manga, where dedicated models like DiffSensei deliver superior performance. + +# Input Text: + +"Please generate a black-and-white manga using the given characters (a young man, a child, and a woman). Each panel may appear 0-3 characters. + +1. A man is lying on the floor surrounded by books and papers, with a radio nearby. +2. A woman with curly hair is smiling. She's wearing a patterned shirt and apron. She's holding a baby. +3. A man with a surprised expression, his mouth open as if he's about to shout or scream. +4. A young man with a surprised expression, is holding a baby on his back. +5. A man is holding a baby. The man's hair is disheveled. +6. A man with a surprised expression. His eyes wide and eyebrows raised. +7. A man carrying a child on his back walk up a staircase. The man is wearing a stripped shirt". + +# 2.2.5 Low-level Vision + +Low-level vision tasks aim to enhance the basic quality or detail of visual content by improving various aspects of an image. Initial methods often focused on optimizing single tasks, such as super-resolution [88, 95], denoising [61, 63, 55], restoration [60, 20, 62, 84, 15, 16, 17], color adjustment [59], and more [22, 66, 116, 1, 122]. As the technology progressed, subsequent approaches expanded these techniques to handle multiple low-level tasks simultaneously, which is called universal image restoration. Low-level tasks play a critical role in image generation and editing, allowing visual generative models to provide higher-quality outputs in real-world applications. By enabling models to adapt to diverse inputs, they ensure that the generated images perform well across different visual tasks. This is especially important in areas such as image restoration and video enhancement, where high-precision visual content optimization is crucial, such as in film post-production and autonomous driving. + +We evaluate the performance of GPT-4o in this challenging task. Firstly, for some image restoration tasks, such as super resolution, denoising, deraining, low-light enhancement, deblurring and dehazing. We collect reference images from previous relevant works Gemini 2.0 Flash and a universal image restoration model, InstructIR [20], as shown in Figures 24, 25, 26, 27, 28, 29, 33, 34. In most scenarios, GPT-4o guarantees high-quality output images, outperforming Gemini 2.0 Flash. However, there are still some degradation issues that are difficult to remove, as seen in the second image of the image denoising task. On the other hand, for low-level image restoration tasks, maintaining pixel consistency between the output and input images is crucial. GPT-4o does not perform well in this regard, as the content of many images changes. In contrast, InstructIR, designed specifically for image restoration, performs better, effectively removing degradation while maintaining pixel consistency throughout. + +For image inpainting and outpainting in Figure 30, 31. We compared Gemini 2.0 Flash with the latest inpainting and outpainting methods [66, 116, 22, 1]. Only the missing information needs to be completed, but GPT-4o still changes the undesired content of the image. Although the output image quality is higher, this is not ideal for evaluating the task itself. For human face inpainting, compared to the other two methods, the overall artistic style is more natural. For the colorization, we choose the latest colorization model CtrlColor [59]. The overall style is somewhat dark in Figure 32. Compared to Gemini 2.0 Flash, GPT-4o's colors are more natural and consistent with the style. However, there are some inaccuracies in color control. For example, in the second image, the cat's color is not white as specified in the text. Additionally, GPT-4o still exhibits issues with changes in image content, such as the shape of the human's face in the fourth image. + +For the image re-lighting task in Figure 35, GPT-4o performs well in applying realistic lighting and shadows, with natural color tones that match the scene. However, it occasionally struggles with maintaining light consistency, particularly in complex lighting scenarios, such as neon or vibrant lights. Compared to Gemini 2.0 Flash, GPT-4o produces more natural and consistent results, but it doesn't always accurately replicate the lighting effects as seen in the second image, where the neon lighting could have been better captured. IC-Light [122] is effective in applying realistic lighting, but tends to lose detail in some complex objects or faces under different light conditions. Overall, GPT-4o is a strong contender for the image re-light task, providing good light consistency but leaving room for improvement in some specific scenarios. + +In summary, GPT-4o demonstrates strong performance in various low-level vision tasks, often surpassing Gemini 2.0 Flash in output quality with more natural and visually appealing results. However, it struggles with maintaining pixel consistency and avoiding undesired changes to image content, which are critical for tasks like restoration and inpainting. While its adaptability and realism are impressive, there is room for improvement in precision and task-specific consistency compared to specialized models like InstructIR and IC-Light. + +# Denoising + +![](images/1a4731938b542638df6ad94c956073db2b548c15e4b1b8114d5307a10c99ea73.jpg) + +# Evaluation: Image Quality. + +![](images/0a89af2a4699039f927a01b35f74a721a10ddc2eb096385ea8edf582d6c03d50.jpg) + +![](images/1beb76da0b2a004a33d76279d80831d9cbb3917b5daee0d2aa13d05feb493089.jpg) + +![](images/174139ab6dcc12cd6a99f039cfe71445d47612cb61b456c34e139a4d44ce5b27.jpg) + +![](images/25c6f8568e7bcdc44e6309534f95b1a0b59bb18d7773a69a2d061ee65e297860.jpg) + +![](images/25046cf08634831335f437686635f7b1affeb5cf872dfd9bdeef73ad97a4f786.jpg) + +![](images/0a2a55819ee33dc9ba77d52891f0222654f300378e0c315f91df125d5feab4ef.jpg) + +![](images/34f3e69973949e582903e9064b4ccc8b319f75b359178b7208421be704e55d45.jpg) + +![](images/46dec341162765728e1fc8f0cd002ebf4ee74f97cd953e898c4270f10f2a5328.jpg) + +![](images/47297a815dfe05f13941cb00e869c4a19a92d4533649e2a583bafd94faf893c9.jpg) + +![](images/9f7537d877ef9e59c4b7d2865b5b20522610acea27d5295f2b7a540a7f158aa1.jpg) + +![](images/4425fde2f8933a0d0a27e3c8934be18f51d1e042682ca256127948d03c7d8026.jpg) + +![](images/29f5e73b544db2cde87a7ae619bda23d4e11cc25cfb4269634f02ce791227353.jpg) + +![](images/6b407e152dd4b4cace6037d3643ed856c1e206632e4bbccf517c4fef9f21d5ff.jpg) +Figure 24: Task: image denoising, aiming to remove the noise information and obtain high-quality clear version. Setup: We compare GPT-4o with InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the denoised images. Observations: GPT-4o can restore high-quality denoised images. Except for the second image, where the noise cannot be completely removed, the other images are free from noise. However, for low-level tasks, GPT-4o does not maintain content consistency well — the background colors and object shapes in many images have changed, such as the background color in the first image and the floor in the fourth image. + +![](images/180b6d2fbe08d1106795eea1f9a5165620b7ff33122c84b0e0c53f5d47bd52a9.jpg) + +![](images/4e2bd472765d312f5f2fa59f23f64b308cadfa60ddeaef7c74ca0effcf81c1de.jpg) + +![](images/345956a47d1ba18bc47ef420be35f2680ee92364e251cc4f0c85890f41d35a52.jpg) + +![](images/ea1923c868229c788db11eb4c4c0a0c7fdfff3244b7867c71f36ecc71509d140.jpg) + +![](images/0d95f3d9522c913d7fb63c52805b36252551d9de67bab8edf492afd706660e7d.jpg) + +![](images/2865da09f43ccfb7bad4c1a38f720f8a7f22179ea35f92a1539146184df21cd6.jpg) + +![](images/273a3c5b1d1bd94335184aafbed95bd03cde1025a92f44aa700ed255ea3c88e5.jpg) + +Input Text: "Remove the noise, make the image clear." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +InstructIR + +# Deraining + +# Evaluation: Image Quality. + +![](images/c33450ac171eaded049f91066f0deeba4af786415bc6dd5d653dc55c54b3ee41.jpg) + +![](images/e15b31d827df49078bd8ff4b5a3c0b7e9766b7e0ce33322d4b3e8fa24173ca40.jpg) + +![](images/4bd0fc1b08f65d2b90caeb19644fda48d0c14d56f7a3b6d5b0258145a86adade.jpg) + +![](images/dccfcb457f6ecc58a1354c97e2786be9af3ddba306e9c14119bc34b83281433c.jpg) + +![](images/e7da05826e2092da3f8c786eaba5c6a6330ab3558f6720c639f754e67843897b.jpg) + +![](images/bb6ced5f0c7f3239e75cdaf62739d7bd88f4e2dea5220e63e06fbbda019b0215.jpg) + +![](images/a0be173027e50abb4eeaa3a09379cf5495e4baeb4d07b8cfe3c3fbfc7f49da66.jpg) + +![](images/902fc2f31952ca02a07900b777fb797fe5708419dadaf611ebdbe7c0305895cd.jpg) + +![](images/53b1fed05e6b1e6eefadf734ccb916b0779e1b0b4d17f572e0bed7be77130d6c.jpg) + +![](images/45630d882ee83bb0a62e1013d838d630afca2a25e99127c4ab251b2d79d6652d.jpg) + +![](images/7e92a2b49a69f9d2edbf84340e1fa8c4dbc2cf0e60655a02c6d49818f70c5d3d.jpg) + +![](images/a8bf6e323c48a02f642dd0e8b8ab2bf45779e5f873173853590a980210b6b42b.jpg) + +![](images/536e0b422f449656c91454149f3446fde812ec0f995fcf5bbe9e0a43e2d597fa.jpg) +Input Text: "Remove the rain, make the image clear." + +![](images/d900d2a2f4a7e03bc322a876b3c802692357c1fec98fb4a0bdfe6de558f8f44a.jpg) +Figure 25: Task: image deraining, aiming to remove the rain streak and get high-quality clear version. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the derained images. Observations: The overall performance of the GPT-4o is well. However, the model struggles with maintaining content consistency in low-level visual details — for instance, the polar bear's background in the first image becomes unnaturally pink, and the underwater scene loses depth and clarity. The flowers also appear altered in color and arrangement. In contrast, InstructIR demonstrates the most consistent performance across all examples, effectively removing rain while preserving the original scene's structure, color, and composition. Overall, InstructIR is the most balanced and accurate model for image restoration in this comparison. + +![](images/4f7cd151bdab47a69f14cb0fefb2b225b07676d7de97906f3f09e608ae362d1f.jpg) + +![](images/a1cf0fd8a07f055d804221a00dac3ab6196ba65037b7980ae7d6ac1150ef8152.jpg) + +Input Image + +GPT40 + +Gemini 2.0 Flash + +InstructIR + +# Dehazing + +# Evaluation: Image Quality. + +![](images/0b161ea7ba45146ed5301977dbc4153cddf37d20e4042c25caf795a6761d7b64.jpg) + +![](images/d9fef0de375f6e8e87b378aa29d3ee9d9b91df21b84ed0eb8eaf780d387eb2f7.jpg) + +![](images/2f53f480f09632c6f3fce2f380303d7ddfff3462b4f02b3fbd217d4b98cd7bfb.jpg) + +![](images/1dcf8e8781f2caee3cccfcec05a958f963d7001ad1378b322b20be96f757857e.jpg) + +![](images/192fef7db091bb823a304562be65f9026029dbec8cd7557e21fc725cf7a16820.jpg) +Figure 26: Task: image dehazing, aiming to remove the haze information and get high-quality clear version. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the dehazed images. Observations: GPT-4o performs moderately well in dehazing, managing to restore clearer structures and contrast in most scenes. However, its outputs often have a grayish or desaturated tone, especially visible in the second and third rows. Gemini 2.0 Flash produces more colorful results but tends to leave some haze behind, leading to a less crisp output. InstructIR outperforms both, offering the most visually natural and sharp dehazing across all examples while preserving original colors and details. Overall, InstructIR demonstrates the strongest capability in removing haze while maintaining realism. + +![](images/fa73cd36deac40a3a5de512bdd640d92cbdd918be0feb815b9659870541d646f.jpg) + +![](images/1f7f8dc7c95580fa7127ddf404e77ee13666d31a7cb6fd3eb7e66943921a35a7.jpg) + +![](images/2b91fde29b7d65bb3965853dad07b3b346f976065009ddf3ee693efe1429b9de.jpg) + +![](images/6487fa3903724c091ac75d960053975fd920521719237fbe538abf890e5b7d14.jpg) + +![](images/b57c9edcd60d270e665488237cb0979f4c4725a3a0c7e8682695c4bab96be517.jpg) + +![](images/68285fa202a5fdaa42f6a5b5c9a5ad11300451918903881288401c4c6d69b8ef.jpg) + +![](images/7dd57547ecd2c2e9b99488cd75ff2d9c6688588f611f0c7b10caea435b7cf9dd.jpg) + +Input Text: "I took this photo during a foggy day. Can you improve it?" + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +InstructIR + +# Low-light Enhancement + +# Evaluation: Consistency. + +![](images/a84122365c616f0e518763e95e8748954778d1d6f37580aebe6d9fcdb6dbdefd.jpg) + +![](images/a5bc8e52d7842f4252e539ba34adcea369c6d11bb3eaa72ea23966ff1c26f6e2.jpg) + +![](images/c090907878b87d590ef3ced70007db5dbbe66202ce9b1e00122e2b8df532d6b6.jpg) + +![](images/995ae0408307d51c1fe4b22c7386e33f12f5f2de57aab5e0c882854a62a49b4a.jpg) + +![](images/d9be1ffaecc96d29076914277e7c30d3db8c4735a052211f5df25b329509f3bf.jpg) +Figure 27: Task: low-light image enhancement, aiming to increase the brightness of the image to obtain a high brightness image. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the brightness images. Observations: In low-light enhancement tasks, GPT-4o can brighten images and recover basic visibility, but often introduces unnatural lighting and loses detail, especially in the second row, where the image remains overly dark. InstructIR consistently delivers the most balanced results, enhancing visibility while preserving true colors and textures, making it the best performer across all three examples. + +![](images/7c783389b6755b3395ca901bd7e2c2f77ed95615d85ca58ae5a381dd0a4cbd5e.jpg) + +![](images/1a5b4b1774fdc23750f023c71c85c56ea578acf01ebde2ea68333a166999bf31.jpg) + +![](images/59b7eab0c07c081369aaeb8fc99ee1be38d0f888691576752368cada60c2dfd5.jpg) + +![](images/bdfaed21c06e793694872cf312fb039cb068ab6d61c502ad781bc11ec2d4c06a.jpg) + +![](images/7ea976f449a8043690de06844c2b94aafce18ca58e6beea0e7e26544e3ab2899.jpg) + +![](images/c00e98505d0ec6544c9ab3f147c4ec2eb644ae9be7a7e5bf1a8d123c564b6686.jpg) + +![](images/2043ba7207ad0d9c159f8b1abd4cb8a7575070b2f4ddccfd301508c5cbc7ce5a.jpg) + +Input Text: "I took My image is too dark, I cannot see anything. Can you fix it?" + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +InstructIR + +# Debluring + +# Evaluation: Image Quality. + +![](images/85f4dbb3e1869fc684c51a8d3da40afdad3cedebd548adaef1a8346cf9e23270.jpg) + +![](images/6b9df00e9d9d77d03f6e7d0327a679c41ab300965c8160ed8995d13466569736.jpg) + +![](images/37feb29d085233a01159a45ae5e03d6b57ff8f11c715ca55b4a77dfee3fc645b.jpg) + +![](images/5d41134c1d56df0d9d4e13222e77a77b5332513dbf0e28ef10e50470295d17e3.jpg) + +![](images/98924e97a312fd055c311dfce4f2f33c194a08959c53493dda6596e9bbacc31e.jpg) + +![](images/5930d203da41b02946e5b564261b3a200c305a6a549a4ba7c3961c80697c16ce.jpg) + +![](images/c52ea48115f508872bd5b5ec87bc06b52622f4b6d3cea22c4f58d26b4d869481.jpg) + +![](images/3270cef07702d259baaf487526f2ae6da683fab7cc7fbfd29b04b26044a58e69.jpg) + +![](images/d25e5d34348f2c9916b8707da5ebf28418590c98d607597c3205410432cd39ab.jpg) +Input Text: "I took this photo while I was running, can you stabilize the image? it is too blurry." + +![](images/10e10b4b43504d485d1b89173a358ad873c0bb08494a06dc780f0f725b125906.jpg) +Figure 28: Task: image deblurring, aiming to remove the blur information to obtain a clear image. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the deblurred images. Observations: For motion deblurring, GPT-4o recovers some sharpness, especially in fine details like text or faces, but the content is not matched with the original image. Gemini 2.0 Flash sharpens the image slightly better in some cases but can introduce over-smoothing, making the result look artificial. InstructIR demonstrates the best deblurring performance overall — restoring clear edges, facial features, and text while maintaining natural textures. It consistently produces the most stable and visually convincing results across all examples. + +![](images/324c435155e439e0e4927509fb4bc07e46d67b68706e04a95c81c2c43616dade.jpg) + +![](images/0d5d4b908f4ff3dbdff9941212fc68ab04e87f2ef75d6228897f56880dd27fc6.jpg) + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +InstructIR + +# Super-Resolution + +# Evaluation: Image Quality. + +![](images/4f5025e7e13970ffe6ae4d344132af9a438277305e7345db9006ce618d0573fb.jpg) + +![](images/16dc34668ed85bc66c78fa13a9e59e97bd1551b7dcf1f770849b1c6ec45313ab.jpg) + +![](images/536066035c978487014a28aff6357f8949e5085087a1135fefd97bd330b10ad5.jpg) + +![](images/9bfe1fd24e221db430a1ef2c97a6b127a44b5d7b0c9d1975f8c8209268003e53.jpg) + +![](images/787c6e3871eb657b7ad2619b07c940f4253e90dda0a171100d9c16bb8ce5179e.jpg) +Figure 29: Task: image super-resolution, aiming to improve the image resolution. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the deblurred images. Observations: In super-resolution, InstructIR delivers the most natural and detailed results across all examples—restoring fine edges in the card reader, realistic texture on the octopus, and sharp trees in the landscape. GPT-4o enhances clarity but misses details like the octopus surface and tree leaves. Gemini 2.0 Flash produces sharper outputs than GPT-4o but introduces unnatural textures and artifacts, especially in organic regions like the octopus and foliage. + +![](images/89139fed496336187890615f822656f053268a0928b6b8aad1304fa164575873.jpg) + +![](images/c817252ab152b2bdf4e05c1e86107c2ae73fac4e18e54c85852a65c86d6eb4ff.jpg) + +![](images/deeb960aa331c541bdfd82bbc98e2d7b1232c00088999d254c3e72f9a70ad184.jpg) + +![](images/4b7ed5536998e9b32588208e95ba79958f7565c5f1a32016788821d35ecd191a.jpg) + +![](images/f91d6cbc924b18c4e72f2efd1f2c0ffe7a85d740967c0f696474bad835204b3e.jpg) + +![](images/23c403764728be45b17ee5db54c9ec0e5a6b239c1402350ba72598d480c17fbc.jpg) + +![](images/7c6e1db18970fe110e86871683e72f4fceaeb6a8c3e0c16fe2dfb612916f8a04.jpg) + +Input Text: "Make my photo bigger and better. Add details to this image. Increase the resolution of this photo." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +InstructIR + +# Inpainting + +# Evaluation: Image Quality. + +![](images/c9d161d0a7f9636e0f15f7cf8b38ef3cde507b88822b80d1c054fc17965684c8.jpg) + +![](images/5d0d22f1be19f77f4671c9695622b0c8c720ea17567c646c8545d5c47f0261dd.jpg) + +![](images/855b2a4ebf86a04b3d8e45ed6cc6bfe5358ad56960cbb002301c9715b3a3062e.jpg) + +![](images/2a2ba40c70067c2bbfe87ba3140fb913b131f18ca0f6a0801c01555583398b16.jpg) + +Input Text: "Please inpainting the image, make it looks reasonable." + +![](images/327aff3d754879bc07e69c135ac470cd76c2e5d5328c263cd495368496462207.jpg) + +![](images/fbb3f36e987742956eb393471e0d4822531b76d7cb4cd749008a8301e0d7abaf.jpg) + +![](images/8da79e08847cb47bf66249a4079de84e4d65ff7744d90920fa8a900bbdbaed53.jpg) + +![](images/d238501a0cfe7a41a5b2875bb5e131d9ad25dd839d448e2d25bc5ab1fcd60ac6.jpg) + +Input Text: "Please inpainting the image, make it looks reasonable." + +![](images/0f8a4c52b6db3fbfb6fe2b7d165d3273b2849687f1ef570d6810290c516025db.jpg) + +![](images/2f732805f2b0034e45b6e06d068adc0a84b73d56f50fb84d71c1d98da5382f36.jpg) + +![](images/92d34852cfd83335f95aa56035188712f19e68bf3f090085cf6440a634a2b66a.jpg) + +![](images/82a5f5ec145a93551e9693a7e4e48cedd5a986546f72da6f448a7c2c57ab92ee.jpg) + +Input Text: "Inpaint the missing part of the face in the image, making the restored area look natural and seamless." + +Input Image + +GPT40 + +Gemini 2.0 Flash + +LatentPaint + +Figure 30: Task: Image inpainting, aiming to restore missing or masked regions in an image to appear natural and consistent with the context. Setup: We compare GPT-4o with baselines such as Gemini 2.0 Flash [99] and LatentPaint [22], evaluating their ability to fill in masked regions realistically. Observations: GPT-4o produces plausible completions but often lacks fine structure and texture alignment—e.g., the bricks in the first row appear flat and misaligned. Gemini 2.0 Flash generates more visually coherent textures, especially in natural scenes like the second row, but can introduce slight over-smoothing. LatentPaint performs the best, accurately reconstructing facial details and complex textures such as hair and expression in the third row, demonstrating superior semantic understanding and visual consistency. + +# Outpainting + +![](images/842316d408aba300ecb8a4b27bfcbba0807e6bf4b66c38430aceb0e69a3016e2.jpg) + +# Evaluation: Image Quality. + +![](images/ecc01e85a79c76a542d72a3c3811916efd3af7089be84ce35914d93646b60cbd.jpg) + +![](images/63bf7481f04803305e25b194d0f6a798e29e4ad8347b13488f22af38472f7dbb.jpg) + +![](images/16e0aacca78d0ddc7841c44b3666005097467f6eb11e4ce97e8f4b2ead39c10b.jpg) + +![](images/e739ed6e7d2e595e03926be7ac6c7b1fb3bd3badc49f246dca21b01655f93b1c.jpg) + +Input Text: "Inpainting this image: a classic dark brown leather Chesterfield loveseat with tufted detailing and rolled arms. It sits in a cozy, traditionally styled living room with green walls, framed artwork, and warm lighting, creating an elegant and vintage atmosphere." + +![](images/62b37976bf23d0f522572b1d32ef2041be79cd68b5ed5fc0163a0fe71955417f.jpg) + +![](images/96f8443e9e1d95f4d7b78542413a7daa8e652b264b9a0a1fb5f13e9ccdf11c2d.jpg) + +![](images/dd6454a272ee52c994805637b87e75f5966a46cdbd76435fa0cf210c205b0168.jpg) + +![](images/2d141aa5b85e10194f8145486f1551abb222659e93a01d192127766df99e63e9.jpg) + +Input Text: "Extend the image to the left and right with a realistic continuation of the street, sidewalk, and background buildings. Maintain consistent lighting, shadows, and overall style." + +![](images/d6cb4c9f05457cd35c0effca1f6abd864d75f43df91491467710d0bc3721a83c.jpg) +Figure 31: Task: Image outpainting, aiming to extend the visual content of an image beyond its original boundaries coherently and realistically. Setup: We compare GPT-4o with Gemini 2.0 Flash [99], and some Specialized outpainting methods (SGT+ [116], StrDiffusion [66] and Dream360 [1]), evaluating their ability to extend content while maintaining visual consistency in lighting, texture, and semantics. Observations: The Specialized outpainting methods consistently produces the most coherent extensions — for example, it accurately maintains the room's lighting and decor in the first row, continues architectural lines and street perspective in the second, and creates seamless snowy landscapes in the third. GPT-4o offers plausible structure but often lacks fine detail and texture continuity, such as mismatched snow gradients or missing shadows. Gemini 2.0 Flash performs slightly better in semantic extension than GPT-4o but can introduce lighting inconsistencies and abrupt transitions, particularly in wide scenes like the desert in the final row. + +![](images/88aa98d1328c5f92799e7830df3cc592e78892873c7d450de34f064e06cadf11.jpg) + +![](images/dd2dae5038367f85e716992705228d85cfed4fff5609bcaadfd9baa3415e36b6.jpg) + +![](images/a25788560611502b56750b8036243b4afafc5fde1438409b033768440c793002.jpg) + +Input Text: "Extend the image to the left and right, filling the black areas with a natural continuation of the snowy mountain landscape, ski path, trees, and sky. Keep the lighting, shadows, and textures consistent with the original image." + +![](images/999497601ef88fdd1c39e3f4a28faa7e03a78e030514867e7db3e4a3be244eff.jpg) + +![](images/5fd37721ff6283442746e2e6bca6d7639893b65c0c252601a4e8feeaec442651.jpg) + +![](images/899da298140f306785ebb56f86b92a0e2dfa83b94136c6477ed20cbea1efa3fe.jpg) + +![](images/80963c1dbf62d49450d0bd1cbdf60c75eaa325a5af95ee28f199c5cd0699b838.jpg) + +Input Text: "Outpaint the center of this panoramic image to naturally connect the left and right desert landscape. Fill the middle area with a realistic continuation of the rocky desert terrain and blue sky with clouds, ensuring seamless blending and consistent perspective." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +Dream 360 + +# Colorization + +# Evaluation: Image Quality. + +![](images/a545885da447cbcefa593efa1163db12a6ecaa2cdbbf6e96a524f3a907d3724c.jpg) + +![](images/b33e1d9b8d191b6badbe6994b410d984583d93bb0eec0eb614f21170a2cdb7d5.jpg) + +![](images/b0d254da72b338ef668b468732303651a5ecbf992ea9575f30ca06355f2c2edc.jpg) + +![](images/0bad4b8a9e257c2529bf84609f3ee26b5086573d0af51df270d4d92513151ff4.jpg) + +Input Text: "Colorize it: a red car parked on a cobblestone street." + +![](images/3520766d4bd31fe95534ccef2679e0d3237151e64a06cfbdbcc03285013be219.jpg) + +![](images/5daddf44b2febe0e6103c50d850ffed87ab4624a8da3303ae0a44eb83321b1ed.jpg) + +![](images/82452cfd1770b08e14ee6fdce267d83915ea906cdd31384da9f70cf3e1cf38c3.jpg) + +![](images/001d30c3201ae2f4664bf8ee499b66e8df811fc307c2c93884ed70982fa958ca.jpg) + +Input Text: "Colorize it: a couple of white and black kittens that are sitting in the purple grass." + +![](images/e0ea300e9865c0a0b5c3f4003da7770db1b8cd75b5f648d2fb6667866807f1bb.jpg) + +![](images/2aea31980297b8bd9effa16d06efa9dd2a95f1e6a7b7a99dc48e7bb6e4ed6a27.jpg) + +![](images/cc99e1fe7082b4516a42bfa326f45cb5f3fae6ae0f81c321344103d47d80cbb9.jpg) + +![](images/8f533a46531ffe4ae8d62f47eb7fc7615dbfab620a667fb4d1593863f2cb286f.jpg) + +Input Text: "Colorize it: a red sports car parked on the side of a street." + +![](images/d10a34e202b94e2de1a4f2d1cd215f5be631f1577e3a8ebeac90787c1852e8e9.jpg) +Figure 32: Task: Image colorization, aiming to add realistic and semantically consistent color to grayscale images based on textual prompts. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and CtrlColor [59], focusing on their ability to follow instructions and produce visually natural colorized outputs. Observations: CtrlColor performs the best overall, generating vivid and accurate colors that precisely match the prompts—such as green lips and yellow sunglasses in the last row, or the purple grass and kitten hues in the second. GPT-4o provides reasonably faithful colorization but often lacks richness or misinterprets tones (e.g., slightly dull red in the third row or inconsistent purple grass). Gemini 2.0 Flash is more vivid than GPT-4o but tends to oversaturate or produce stylized effects, especially on human features. + +![](images/ab3420504662b7f8244c018e079e1cffd82ab918262a3443ff4a8a16e52e66fe.jpg) + +![](images/45547d6bbb7a37e719ea603008ab1f1389806043fdae0a8ed425ac866ed4b27c.jpg) + +![](images/cec51b759ae2fd86f4a6cd9245c40a9af755b055698dd1da10fcbcf1565849ae.jpg) + +Input Text: "Colorize it: a woman wearing a yellow sunglasses with green lips" + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +CtrlColor + +# Shadow Removal + +![](images/cfa4811e58b535cdfd35ac61baf40239638c41912743eed71869ca83aded9682.jpg) + +# Evaluation: Image Quality. + +![](images/e533493affd9962f40a067f07476309f50b38f805e9b7e4cb111bd1d65a85975.jpg) + +![](images/51cc0442254dbc7d3906e354e679e7c97663cf26955df12ad376ac2c249891a4.jpg) + +![](images/038bd8bcbb952c2d249e6b969e11edf6c615ea7d80b89e647965ce83d7b12973.jpg) + +![](images/5d3c5fa7d9c2e13ae59f463d54f3c900277b032d69c97c5dd725781b8fc54200.jpg) + +![](images/8280c76d3783095ac597ffe6112e8e97d4768a43158ffbeaed59287bec81116b.jpg) +Figure 33: Task: Shadow removal, aiming to eliminate harsh shadows while preserving the integrity of the scene, textures, and lighting balance. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and ShadowRefiner [25] to evaluate how well each method removes shadows and retains original object fidelity and lighting consistency. Observations: ShadowRefiner consistently achieves the most natural and effective shadow removal. It produces even, diffuse lighting across all scenes—e.g., softening shadows without distorting textures in complex scenes like the miniatures and dog portrait. Gemini 2.0 Flash removes shadows reasonably but occasionally leaves faint traces or flattens contrast, as seen in the second and fourth rows. GPT-4o shows stronger shadow reduction than Gemini 2.0 Flash but sometimes alters surface brightness or loses detail fidelity. ShadowRefiner best preserves the original color tones and textures while eliminating harsh shadows. + +![](images/abb6c191cd8f3dbc58255f85b9eca1ff0ecbe31840d418b03345458fcd36ab20.jpg) + +![](images/898c8f34d0da327db20aba6653163e933c3b881344fb81df6b0f416dff0a65df.jpg) + +![](images/1bef728ebb6bc7f9c952867864380f4412a3ff879334ec345c1602238ad09c54.jpg) + +![](images/51b49e12056077fe1afb554b56beba3c707955dbdefb32d0bdf28a7d9683ab6a.jpg) + +![](images/e53143ad29978bfb50a47d639d116221922ece9fa13749ad760e4f2ca7cbecbf.jpg) + +![](images/aa0187c33d100a82b28f2505575ea9e419bbb4b364bc8feea7bf801423e56d3a.jpg) + +![](images/e391d4c6d98af6fcc61c7f1d8e60d08040956f96e2c5d61ef9f3aac4d730f7a0.jpg) + +![](images/c159dda7b72f6548e3a35745859137667be43c3d3382e30061df21040584ebd3.jpg) + +![](images/3fda08a0f5b089580c34cfd81ff50f99e0d4660564c4d9f7d8ce3de3d5c373c0.jpg) + +![](images/af45afc3aa4a8340ccb4e22deeda2a924e0315faecedd77d56e667ae464a5ec2.jpg) + +![](images/f7a549c39bdf0880255cebc34e0049aef8e949afe607ba2d7ca38743e5ee21e8.jpg) + +Input Text: "Remove all harsh shadows from the image. Make the lighting even and soft across the entire scene. Preserve all objects, colors, and details exactly as they are. Make it look like it was taken under diffuse studio lighting." + +Input Image + +GPT40 + +Gemini 2.0 Flash + +ShadowRefiner + +# Reflection Removal + +![](images/a92950a6ffd6c5949ed67dbcd8ef27f7a4455d1b77975ddb550332fdae784d04.jpg) + +# Evaluation: Image Quality. + +![](images/26dcc1fa3b38be01934c8e24ea493d1a80141258eb76a34f67d30c186588e554.jpg) + +![](images/8f1ec7ef5d643c0bfda5c6dcee55e55fbae700384aa8d8ff28e6f695f19abd73.jpg) + +![](images/e8c0506441fcc7c04d242543b53f3d7af01afae300d6edb7c5571fbe34319a8c.jpg) + +![](images/fce6882ef70a409cc3043ec9739951fd8feb6eec25edb8d9c56a80dda6894b5f.jpg) + +Input Text: "Remove window reflections, preserve interior details clearly visible through the glass, maintain natural lighting and perspective, photo-realistic result." + +![](images/a39507e46b3ffd1bbdf0ff1d20f603e166fb2c83a3cb4b81c304a49bc56d055e.jpg) + +![](images/d780d806748e7a2e2d245f4bb75612e3a332fab55a1cc6b0fa0ad203f17dd8b5.jpg) + +![](images/293cc3e02dece56450be7a6542b0151c4dcffe5012347d53906f237826b4a55e.jpg) + +![](images/a3941e54ef0c8ca433e936496f30948b94b4e9f8ef2115b981275376d66cf6b7.jpg) + +Input Text: "Remove the reflection of buildings on the wet ground surface, make it look like a clean and dry textured concrete floor, realistic lighting and natural color tones." + +![](images/2997ae5bbb03c3903ad7d778e996f50f9bff8f0a1ab2e783b344083f30f3cce3.jpg) +Figure 34: Task: Reflection removal, aiming to eliminate unwanted reflections from transparent or reflective surfaces while preserving original content and realistic lighting. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and DSIT [39], assessing their ability to remove reflections while maintaining scene realism, texture fidelity, and lighting consistency. Observations: DSIT shows the most effective and natural reflection removal across all examples. It restores interior visibility through windows (e.g., bed and car interior) while preserving lighting and geometry. Gemini 2.0 Flash removes some reflections but often leaves faded traces or dulls textures, especially on glass doors and wet pavement. GPT-4o performs better than Gemini 2.0 Flash in preserving background details but sometimes alters color tones and sharpness. Overall, DSIT provides the cleanest and most photorealistic results, especially for transparent surfaces like glass and reflective wet ground. + +![](images/145f3c5298c9786c810dc961b48e4e79d1a401610fd467498400ef8e5de81c24.jpg) + +![](images/517e9f2a30d20e6e67d1e7cc8d3275c188d4ba9fe1e3f526c5ea127a17812855.jpg) + +![](images/5d871e96ad190ac8245ba7e97f4356f76317189e597d17530c30c48b2ef31192.jpg) + +Input Text: "Remove reflections from the glass doors, make the interior clearly visible with natural lighting and sharp details, keep the golden door frame realistic and intact." + +![](images/4d9a72541da86687fce106cdee64c185f7126d2f2e035a9623361561c2006136.jpg) + +![](images/06b68c2bd1d1e1e567e71454157f92909dcb91789d0b950ba4f61dd09198910d.jpg) + +![](images/1f17bc39103c2c1bcbdf239933f68441984fad34a4c78c96991f6e9a246af36a.jpg) + +![](images/6bf819c8a7dad12a034e9854583b927e93387ef6c95c36f44d5e4a7ebbd9eef8.jpg) + +Input Text: "Remove reflections from the car window, make the interior of the vehicle clearly visible, preserve natural lighting and realistic textures, keep the car frame untouched." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +DSIT + +# Image Re-lightning + +![](images/1a9a2fe4848dccc2cbdc43a21645b7d9153e4a783d4634d91cd4b7240855ca79.jpg) + +# Evaluation: Light consistency. + +![](images/303aadcf1f413c53fe51a10f2bbdb1d2f38eaebbbf1bafbaf65caa46e66c8863.jpg) + +![](images/73a85fa3c615d1faa5d51ccafc720c7d1d72730add58f7b85b6f81378602781d.jpg) + +![](images/89185a1d1d4373656d69efacb10271368adb3f6ab4aa29bb7ce10ccd11ba70a6.jpg) + +![](images/33aeab41543cb1f610d264106b611a2050ccfaaf21583d7c7034d3b1ef94d142.jpg) + +![](images/1186cfc667444853a5ea105221e7465b842983362ddd848dab0f81318dea818f.jpg) + +Input Text: "Given two input images: + +Image 1: A classical marble statue in neutral lighting. + +Image 2: A city street at night illuminated by neon pink and blue lights. + +Please generate a relit version of the statue from Image 1, as if it were lit by the lighting conditions of Image 2. + +The result should preserve the details and pose of the statue but apply realistic colored lighting and shadows consistent with the vibrant, mixed neon lighting of the second image. + +Light Map + +Input Image + +GPT 40 + +Gemini Pro 2.0 + +IC-Light + +# Text-Prompt Image Re-lightning + +![](images/dfe5bd0363834d7a517a03c99514ab82f92032ef90b648cf55ef38ce418a6054.jpg) + +![](images/7a43c02aec8d995221ba9a8b29fbfb292e2b680c4330dfae1451b1cbd1e28f15.jpg) + +![](images/8aee6376931ccae47da7774dcc80f87cb4655a39088d2495592ef2b55cc6e8ce.jpg) + +![](images/6b14e73a8dcd42c4119885d683c4b57a1f2394b2ba3d7cfed0c412d829af1eb7.jpg) + +Input Text: "Sunlight through the blinds, near window blinds with a reasonable background." + +![](images/9353609b07179915947a49bbe7cb97c69d989d42af40bfbeb65de7a2a7c68425.jpg) +Figure 35: Task: Image relighting, aiming to modify the lighting of a given image based on either a reference light map or a textual description, while preserving identity, texture, and spatial consistency. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and IC-Light [122] on two subtasks: reference-based and text-based relighting. Evaluations focus on lighting realism, directionality, shadow accuracy, and semantic preservation. Observations: IC-Light achieves the most realistic and consistent relighting across both tasks—accurately applying neon lighting from a reference image and generating sharp shadows and natural light from text prompts. Gemini 2.0 Flash preserves content well but produces softer, less directional lighting. GPT-4o offers more vivid lighting than Gemini 2.0 Flash but sometimes lacks shadow accuracy or background coherence. + +![](images/81995073d133c3b619b48c6e202b90c68116f067eb1b41f3fb9d692f8ddaef05.jpg) + +![](images/0bcf87aef23dd6d3d1cfe80c898e5921b661e040732d6eeef59708f811da4ad7.jpg) + +![](images/1d6da19c3cbec9b2d439612a2a8553113f28a4397139ddfe38d9848ae7430cc6.jpg) + +Input Text: "Sunlight from the left side, beach with a reasonable background." + +![](images/39860355d35a8389c1208ff488e0389da02ec58d4919778ab96d9ee07712b2fd.jpg) + +![](images/b5b70fc8304a56c9d8bae3967435e4b106ab643b441b2630a6c6e823636ba844.jpg) + +![](images/2f4b6a584e5347f2a2ffa4c55015f094171d881e87ced85fc577e90c71f0f4a9.jpg) + +![](images/9e208291de9a6b6fb7d4acda0172d03e668194c7eb8b35eb83dd89d00607f9ab.jpg) + +Input Text: "Sunlight from the left side, beach with a reasonable background." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +IC-Light + +# 2.2.6 Spatial Control + +Spatial control aims to generate visual outputs that not only reflect the content described in the prompt, but also precisely adhere to additional structural conditions (e.g., canny edge maps, depth maps, sketches, poses, and masks). This task evaluates a model's ability to faithfully align text guidance with visual constraints—an essential capability for real-world creative applications such as illustration, animation, digital content creation, and visual storytelling. + +In this section, we examine GPT-4o's performance across five representative types of controllable conditions: canny, depth, sketch, pose, and mask. For each setting, we compare its outputs with those from Gemini 2.0 Flash [99] and a strong baseline method using ControlNet-based [121] diffusion backbones (FLUX.1-Dev [51], SDXL1.0 [82], SD3 Medium [27] or SD1.5 [90]). The results are illustrated in Figures 36, 37, 38, 39, 40. + +Overall, GPT-4o achieves performance that is on par with ControlNet-based methods in many cases, especially under common or moderately complex conditions. In particular, GPT-4o is capable of handling semantically rich or contextually complex prompts, where its strong foundation model understanding can help preserve both high-level semantics and visual plausibility. This is especially evident in tasks like pose-to-image or mask-to-image, where the structural signal may be sparse or ambiguous. However, GPT-4o's strong generative prior can sometimes lead to overly detailed or hallucinated elements, which compromises structural fidelity. For instance, in canny-to-image or depth-to-image tasks that require fine-grained geometric alignment, GPT-4o may deviate from the input layout more noticeably than traditional diffusion-based methods. In contrast, ControlNet exhibits more stable and accurate control in these low-level structure-guided scenarios, making it better suited for applications where spatial accuracy is critical. That said, ControlNet may struggle in more complex or open-ended cases, such as mask-to-image scenes involving multiple objects or interactions (e.g., aquariums with visitors and fish). In these scenarios, GPT-4o's strong cross-modal understanding partially compensates for its weaker control, offering plausible but not fully precise outputs. By comparison, Gemini 2.0 Flash lacks robust controllable generation capabilities across all evaluated control types. Its outputs often fail to match either the control condition or the textual prompt, reflecting limited capacity in multimodal alignment and structural grounding. + +In summary, GPT-4o demonstrates performance comparable to SOTA methods in most cases, excelling in tasks that require rich semantic understanding and contextual complexity while maintaining a balance between high-level semantics and visual plausibility. Although it may exhibit structural deviations in tasks requiring precise geometric alignment, its strong generative prior gives it an advantage in handling complex or open-ended scenarios. + +# Canny-to-Image + +# Evaluation: Controllability and text consistency. + +![](images/678977bfcc7731282c3cf0706d2a04a055df0dfc7950b35c45d46e6ffafa9252.jpg) + +![](images/34eb588af3ad0d16e975cf94c0165c9ea9cdbab78610d9c4fea8f37457c30f62.jpg) + +![](images/09b169752ee560ce282e0e189791bd22c980b5f6d260fd06d327d0bb93aa3fdc.jpg) + +![](images/7ba7f124d58c88bee0135fc235b0aebd8b6426909a755ac1f40c313c7c9cad81.jpg) + +![](images/e2e8d2f4bc567a25d1d7f56d98489dc9897b06c00ba90d8a58c28ba70a1e2151.jpg) + +Input Text: "Follow the prompt and canny condition below to generate a controllable image. The prompt is: a cigarette with purple tobacco." + +![](images/509dd28767c58df8dc35fefb0ee74154d0b9a3aadcb516526f252eee3fbd72ef.jpg) + +![](images/01c729abfdd7ba1065b518b5f8fac38df17a322d899f16a92e21606ffe75be31.jpg) + +![](images/572feead6e536b7524eb2f723ff12841c3e537418651f45988dbcb4312766cf2.jpg) + +![](images/9b39db21bfca83d6660c37f7265a41bd9d7830647b6fcbdaafa3cc14372c9c6e.jpg) + +Input Text: "Follow the prompt and canny condition below to generate a controllable image. The prompt is: a traffic sign with red cross written on it." + +![](images/582236643d6a53099264228610a5ef869dd1b77a4e837887915e57f35aec63bc.jpg) +Figure 36: Task: Canny-to-Image generation. The goal is to generate prompt-aligned images guided by canny maps. Setup: Each row shows an input canny map and a text prompt, with outputs from GPT-4o, Gemini 2.0 Flash [99], and FLUX.1-Dev w. ControlNet [51]. Observations: GPT-4o performs worse than FLUX.1-Dev [51] in structural fidelity, often introducing additional visual details that deviate from the input edge map. However, it produces more semantically aligned and aesthetically pleasing results overall. Compared to Gemini 2.0 Flash, GPT-4o significantly outperforms in both structure preservation and prompt consistency. + +![](images/2bc479794ab27b52ddd075f3f33d1e15a260aae2e169b634378ccdde014ad491.jpg) + +![](images/d328a02168204bffd240fc039c323fc7a9152c7db8d314980163eda7d00ecf5d.jpg) + +![](images/65ad8232e82ff1d205196c6cd60b284ad9f6d0f84803d62c20199cec22907447.jpg) + +Input Text: "Follow the prompt and canny condition below to generate a controllable image. The prompt is: oil painting of geese flying in a v formation over a pond at sunset." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +FLUX.1-Dev w. ControlNet + +# Depth-to-Image + +# Evaluation: Controllability and text consistency. + +![](images/74777175c863d9f366b45a7843d8eb9164d1d6ccee90d1c36c565abd5a6cd450.jpg) +Input Text: "Follow the prompt and depth condition below to generate a controllable image. The prompt is: a wooden bridge that has fallen down in the grass." + +![](images/4269989ea478ceccbce68d7fb7a7ad5b3bda0673dc3e29b9f2ccd7951d3cd19a.jpg) + +![](images/c4a439744d0603cca113101ddccdd39ff6f86154f75db50db717e2e21c43d890.jpg) + +![](images/c34d4877edd4f942f60dcc2c4527c7ca7b8db7854520d882872d6a05a6e0c924.jpg) + +![](images/18cac760bb10b2890aca59d644e60712db22b56fe52a9734a701f01dc680756b.jpg) +Input Text: "Follow the prompt and depth condition below to generate a controllable image. The prompt is: a 3d image of a stone building with plants and rocks." + +![](images/d96c269179392a477b20cac488bf504f948497d76d9712d151212c0dc9a0c84f.jpg) + +![](images/033fe2dbc7534a41ab6e0d52eae6d42fbd80b6b350bb3fbca8b041c43fc41b40.jpg) + +![](images/46b577cbc8feb431b513caaf0fa800f1004a3c0cb4738f315c72a97f38bb97c4.jpg) + +![](images/0d51b383aa6e1f47aa6ac0e33d09504e867f5f452a0b912bdf9c993b4e2622b3.jpg) +Input Text: "Follow the prompt and depth condition below to generate a controllable image. The prompt is: a red pillow on a chair." + +![](images/46017199672cdc547f9700dfe377960c656d99e3ef6cc597d38c0dbafddd087b.jpg) +Figure 37: Task: Depth-to-image generation, aiming to synthesize controllable and visually coherent images based on a text prompt and a given depth map. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and FLUX.1-Dev w. ControlNet [51], focusing on controllability, text-prompt alignment, and the visual quality of generated scenes. Observations: GPT-4o generates visually appealing and stylistically consistent images that align reasonably with text and depth cues—such as the bridge scene and stone ruins with rich lighting and artistic tone. However, its controllability is weaker than FLUX.1-Dev w. ControlNet [51], which shows more precise depth alignment and object placement, as seen in the accurate layout of the bridge and red pillow. GPT-4o leans toward stylized coherence, while FLUX emphasizes photorealism with sharper spatial fidelity. Gemini 2.0 Flash lags behind both, often showing depth misalignment, shape distortion, and weaker semantic grounding. + +![](images/90148daf477655ba5438aee9571917bb7d337f466c861465128cb7f0a6616246.jpg) + +![](images/c2e5c471cced0e2bc19cbb418990047f6502461f9ab068015212292ab802d155.jpg) + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +FLUX.1-Dev w. ControlNet + +# Sketch-to-Image + +![](images/ca0e9e3e4ad44d1a4998e2e53190039767bd351173e262b703bb663bf16e4dd2.jpg) + +# Evaluation: Controllability and text consistency. + +![](images/b553f7bab31ff9efef4880bc6749cc3b0972fb954926e7cddb979fe4fa86b1e4.jpg) + +![](images/e37560da746ee64c9f4dbda99f18792081354ee1290d5fb107e6b17833cab89b.jpg) + +![](images/774a2f8158ca42b89d06121387988ac2fa21eab2708a59d2a40c47aeeefeea74.jpg) + +![](images/9b25e051c800568d87dbf568b975a694ac1831be64dac4fe52e936e4cbe78e4f.jpg) + +Input Text: "Follow the prompt and sketch condition below to generate a controllable image. The prompt is: A small giraffe eating grass." + +![](images/76b3ffacb274a3c770ba9db5c2fcea3609020e8b054535fa7f050994e7337844.jpg) +Figure 38: Task: Sketch-to-image generation, which requires translating rough line drawings into realistic and semantically accurate images guided by text prompts. Setup: We evaluate GPT-4o against Gemini 2.0 Flash [99] and SDXL1.0 w. ControlNet [82], focusing on how well each model respects the provided sketch while reflecting the described content. Observations: GPT-4o excels at generating lifelike scenes that match the prompt, often delivering visually pleasing and contextually grounded outputs—like the natural posture and setting of the giraffe or the dynamic movement in the parachute example. However, it tends to soften or reinterpret sketch lines, leading to slight mismatches in fine structure. In contrast, SDXL1.0 w. ControlNet [82] offers stronger adherence to the input sketch, capturing geometric details more accurately (e.g., fan blades and figure outlines), albeit with slightly more synthetic textures. Gemini 2.0 Flash shows limited understanding of both sketch and prompt, often producing less realistic or structurally off-target images. + +![](images/5bbe5e94282bd4bc533b7d94b749decb59742f5aee7bcb68887dcce921c8b324.jpg) + +![](images/c3fac1a22608550f62bd5e74998a43c8ae3cd590d4dfa1647701dea7d93c7ce8.jpg) + +![](images/b2861bea0423cee774150f0887bdb41c652b436f4eada06228dfb7f7f53078b3.jpg) + +Input Text: "Follow the prompt and sketch condition below to generate a controllable image. The prompt is: A red metal electric fan." + +![](images/6e8cda0469a84878dd4813f102a0c8e09c7e5e7f7b3d81edb91ab19c428a22a1.jpg) + +![](images/b143d496a975b31c007201551ab2858b5f989539fd28932d11bf8433f8fa182f.jpg) + +![](images/ecbf422861a6f3acb1bf2fa0c11269caa5fa86090bdbdd18230b1dcaaeb183d8.jpg) + +![](images/e7467303dc6f9aead4b04680ab549cd4c9987da28a7b8e07f93928c3cc115828.jpg) + +Input Text: "Follow the prompt and sketch condition below to generate a controllable image. The prompt is: a man holding on to the strings of a flying parachute." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +SDXL1.0 + +w. ControlNet + +![](images/4f2a82196d91323ac6987c501fe192b87a37b8c7bd49cfefc255b54f649c96f1.jpg) + +# Evaluation: Controllability and text consistency. + +![](images/ef305485daf2aa15d9580d3579fba6ec68a2f97825b971e73b7dda80fc7b27d3.jpg) +Input Text: "Follow the prompt and pose condition below to generate a controllable image. The prompt is: Quarterly in a blue and white jersey with number 14, preparing to throw a football during a game." + +![](images/3015117dffd29de40979d63938f43194ed356ec24b893f0eca89a8e4d2397504.jpg) + +![](images/49309cffd79e60cb0270f525f65a35ec7a1dda136d91a8690b5a4235fa3ad428.jpg) + +![](images/4f0a2ccea09917b8073951d104a30402dee27f0a79f45727ae62f4aca6252dac.jpg) + +![](images/1529d55096562ea1287a6e80927c823cf3ef85f6ff2fc349534df8add404dfe4.jpg) +Input Text: "Follow the prompt and pose condition below to generate a controllable image. The prompt is : A young woman with long brown hair, wearing a blue strapless dress and a black necklace with a butterfly pendant, poses against a beige background." + +![](images/cbc8f5122209a93c7fcd5c01f787a5328fd12da73ba30b8fe0fa36d8089f19ee.jpg) + +![](images/b6efdeb3733117ab0264a27bea6c1da70b95aac300ccfe78cc7e7eaf29cc41bc.jpg) + +![](images/386ab41549ccbbdcb6953f2cfb0c22f29b59c77cb08a11e2d82aaf042ab0a3a4.jpg) + +![](images/12d972a127e37a6219e4bd892eadd138cfbb736fb6916e3a8ea8c8cccd698774.jpg) +Input Text: "Follow the prompt and pose condition below to generate a controllable image. The prompt is : A woman is performing a pull-up exercise on a gym rack." + +![](images/eff06e3cb3442fb9e89cec40098e50870a9ded39e1f69753076da696882aa86d.jpg) +Figure 39: Task: Pose-to-image generation, aiming to synthesize realistic images that reflect both the human pose and descriptive prompt. Setup: We benchmark GPT-4o against Gemini 2.0 Flash [99] and SD3 Medium w. ControlNet [27], evaluating their ability to follow pose conditions while generating semantically accurate and coherent images. Observations: GPT-4o performs well in complex scenes—such as the football example—where it effectively integrates pose, clothing, and background with strong realism, contextual and pose accuracy. In simpler cases like the pull-up exercise, it shows occasional pose drift, especially in limbs. SD3 Medium w. ControlNet [27] offers better pose fidelity overall, though its visual quality can be inconsistent. Gemini 2.0 Flash underperforms in both structure and coherence, often generating anatomically incorrect or visually weak results. Overall, GPT-4o balances text understanding and generation quality, especially in detailed prompts. + +![](images/c9e2f0970eb2e881c87f2518165ec8fafd6765bc14d8e17f59829268ad6411c8.jpg) + +![](images/b7302a93838bd0514ce6b4544837c9f5854b7f09d9a31b036a582c5735883900.jpg) + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +SD3 Medium w. ControlNet + +Mask-to-Image + +![](images/3395f7e5893f7029588b9d5b16f3549b7c5baa04869e047e731f2d72c81f6981.jpg) + +# Evaluation: Controllability and text consistency. + +![](images/3b637fcfa8060bbf723d957b7f4a1aa49a5f9d87c744993d0514d9873e23989e.jpg) + +![](images/8175a842bf2f363a8a70eb3f0550f36e400f3fdd4bd3f80909fb7879a8e130c1.jpg) + +![](images/6e8d8fa6a7fc992e352dc0100c93543a6693728f8a6fe64aac51f54c477a6736.jpg) + +![](images/494f4bc560feb0a0c69c549d9e94d552cb214aa34f2e532c73c99c615ff3a691.jpg) + +Input Text: "Follow the prompt and pose condition below to generate a controllable image. The prompt is : A peaceful indoor church scene with a plain wall, stained glass windows, a wooden podium, and a stone altar under soft sunlight." + +![](images/8d843e8b140fd3c6f219b02b54f981e68e8501565fcef5774b45f8e54c5de112.jpg) + +![](images/e9707ee40f1e21949b575b798c93018191eaeb0953a7978ef48d77face6376a1.jpg) + +![](images/41fa18b12458faa6a8dc00ecb9ddebef31263254df2985d4bd17d39f0aee636c.jpg) + +![](images/fd41fad2fe31ea862cb86cef72fd1fa75256598dab78c33edf42129c6eb8f4a1.jpg) + +Input Text: "Follow the prompt and pose condition below to generate a controllable image. The prompt is : An indoor aquarium scene with a large fish tank full of colorful tropical fish swimming. The fish tank is surrounded by walls and has a visible floor at the bottom. The environment is bright and underwater-themed." + +![](images/32972f2863a4ed568d406e940ec73fb2acb89d05a9ce88264831494aca2909d5.jpg) +Figure 40: Task: Mask-to-image generation, which requires translating semantic segmentation maps and textual prompts into coherent and realistic images. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and SD1.5 w. ControlNet [90], focusing on their ability to combine spatial layout from the mask with deeper scene understanding from the prompt. Observations: Compared to previous control tasks, this setting demands more from the model in terms of semantic reasoning and compositional understanding. GPT-4o excels in this regard, producing visually consistent scenes that align with the prompt's intent—such as the serene church interior and the immersive aquarium setting with visitors. However, in fine-grained spatial control, especially with small or tightly shaped objects like tropical fish, SD1.5 w. ControlNet [90] performs better in preserving shape and positioning. Gemini 2.0 Flash continues to struggle in both fidelity and adherence to masks, often missing key scene elements or producing oversimplified outputs. + +![](images/97eac775ab7fe9404225b9ec41d4b00866061d69df88ce9adc4b8d365ba02992.jpg) + +![](images/de15cb94a339b40e3ca5dcb35e27521f86d056376dba353e39d9dfc849b5350a.jpg) + +![](images/c99eb6ee2f33a13babe23a2160bddcbb33b49dbaaa63c5c80ad3c9928ada400c.jpg) + +Input Text: "Follow the prompt and mask condition below to generate a controllable image. The prompt is: An indoor aquarium with a large fish tank and colorful tropical fish, with a few visitors in the scene." + +Input Image + +GPT40 + +Gemini 2.0 Flash + +SD w. ControlNet + +# 2.2.7 Camera Control + +Although recent visual generative models demonstrate remarkable capabilities in creating high-quality images, generating images with specific camera settings (e.g., bokeh blur parameters, focal length, shutter speed, color temperature) and making further adjustments remains a challenging task. We further explore GPT-4o's performance in camera control, evaluating its ability to generate images with desired photographic parameters in text instructions. This task is particularly significant as it bridges the gap between artistic creativity and technical precision, enabling users to simulate professional photography techniques and achieve greater control over the visual output. Such advancements have broad applications in fields like photography, cinematography, and visual design. + +Specifically, we collect text prompts from [118], and compare GPT-4o and Gemini 2.0 Flash [99] with Generative Photography (GP) [118]. The results are reported in Figures 41, 42. We can observe that GPT-4o achieves decent results in controlling bokeh blur parameters and color temperature, demonstrating its strong generalizability to various photographic settings. However, it still falls short in adjusting focal length and shutter speed, occasionally leading to inconsistent visual semantics or incorrect visual effects. By comparison, Gemini 2.0 Flash struggles significantly across all camera control scenarios, failing to produce coherent or accurate outputs that align with the specified photographic parameters, highlighting its limited capability in this domain. + +In this task, GPT-4o shows promising potential in camera control, outperforming Gemini 2.0 Flash and achieving competitive results in certain aspects. Nonetheless, there remains room for improvement in handling more complex adjustments, which could further enhance its applicability in professional photography and creative industries. + +# Camera Control + +![](images/541421ed5de9f82a9202335ca7678323b87627971bf9397ec165d6899894a416.jpg) + +# Evaluation: Camera setting adjustment, semantic consistency. + +GPT 40 + +Gemini + +2.0 Flash + +GP + +![](images/57830b99a43f06f49ba04d1b2f5ded94ea4df8175531c29f23aca0f89a53b83b.jpg) + +![](images/91df94d12a7739436d063cd8168ea4e4d26c88f28f02f9407f230adabe8d29e6.jpg) + +![](images/0ec6090994cbd2295c92ceaabd8108bda9fc1d44cff795793c63c2f03a6f1419.jpg) +28.0 + +![](images/f69e3b0379013765f2adbc1aca797b0298039407cd2b3e804055f8aa7b5d7129.jpg) + +![](images/e06a1d22dd6093751188bb61a4da58c22606454753b4f5ef255503039c23b4d9.jpg) + +![](images/c037197eb3df793fe54ce69be58db82135ee54f307b277b1651e4aea252edd89.jpg) +14.0 + +![](images/7617257b54d51121e2b378531c54403d5f093b828cb72764f28cbaf8e2d75466.jpg) + +![](images/cbefa33dacb0fcc87c8295a83dda71231a6f564b3f6a39b28e9b44a76d014c28.jpg) + +![](images/0abff9671077ea8920b3cfaa655376afa170146e8731c668c911bd36367fe676.jpg) +10.0 + +![](images/3f74eb072bab2b800151c5b569b4d234ff14848a96ea3fa56d91a676cf3c70bf.jpg) + +![](images/3fe16c86a896d908033e9f78ddd28e8ff19dae54bfcbf58eef865c9f9147a115.jpg) + +![](images/e138171044c5ed197f98e471ee68b0a7988c369038ddde3296f693594e3ed2c6.jpg) +6.0 + +![](images/4ae472a9c88dd869cf1cee61a0638cf7a434e0355bfd8577ac37c979ef8d008a.jpg) + +![](images/02202d497506f20a18941658d169deee92ee7a8b17e62ab0a67e4f38d7fc3f81.jpg) + +![](images/359582282a17e20740a8e878147c496925396e8e780d4f8429b70706e235f66f.jpg) +2.0 + +GPT 40 + +Gemini + +2.0 Flash + +GP + +![](images/6ac4b0f6100f66c6e3a3675e85edf101192eaa5e7afac67acbc73dad433f98df.jpg) +Input Text: "A horse with a white face stands in a grassy field, looking at the camera; with bokeh blur parameter *" & "Adjust the bokeh blur parameter to *" (* indicates a specific value). + +![](images/f13c64cd986d4479510b7a4819c3393ff74c88928cb3d8ab9642dde087405fb8.jpg) + +![](images/8ba638159d6af8b00306bfa770db1930683a70fb3c84b43764507be8b7b6f124.jpg) +24.9 + +![](images/032dd6d95007fdd7dba51b274352d33e5c899e1f0a93a08947da88f005ed8904.jpg) + +![](images/32ffc1eb96999685c0b89d169c9aa5316abd2e06ff28b61ba0beef011cd7d160.jpg) + +![](images/6f7393447d4c0dd9cc43b403fb767ccae61d13073cbf4e7b6de1e3bbffbc5cfd.jpg) +36.9 + +![](images/e9ec594631498e2a1f8e661bedae31c443f9b190e1be8f521a61188f4d8f033b.jpg) + +![](images/d489e64ba5236d20a8ccd8750097eec10c9d7fb5eb57f9ae796871bfa2203c22.jpg) + +![](images/7c825dcf01a94678b59a1ac7f191122428497b7adf966110443aeb1232b68996.jpg) +48.9 +Input Text: "A beautiful garden filled with red roses and green leaves; with * mm lens" & "Adjust the lens to * mm". + +![](images/7b779ff8fb58b3aae04af13166a3b1967efcf87892bd378886be402ea8763601.jpg) + +![](images/889fe3842b7457a957d4f92c06ccec58df898a9c56de1a94074c190c063efac7.jpg) + +![](images/b5bfd8b85c6c3bc556cebefab62a44ff37c4d3e8e69240fa38a6ac13c7cc168b.jpg) +60.9 +Figure 41: Task: Camera control. The goal is to generate images aligned with specific photographic parameters, such as bokeh blur, focal length, shutter speed, and color temperature. Setup: Results are based on text prompts collected from [118], comparing outputs from GPT-4o, Gemini 2.0 Flash [99], and Generative Photography (GP) [118]. Each row includes the input text instructions and corresponding outputs. Observations: GPT-4o demonstrates strong performance in controlling bokeh blur, producing visually appealing and parameter-aligned results. However, it shows limitations in handling focal length, occasionally generating inconsistent or less accurate outputs. By contrast, Gemini 2.0 Flash struggles significantly in both aspects, often failing to produce coherent results. Overall, GPT-4o achieves better performance in this task but still requires further refinement to enhance focal length control. + +![](images/099e4ec78e062250f7bdab074cc4518ca8ed428e187a0bb348ddc4b8e86c87a1.jpg) + +![](images/74da7612dd6d92ecd657e1f44bdbfc6138a44b8f5bad9b26166f4cd12e4bcb65.jpg) + +![](images/e3c6465c6ac99b5cac8b7b87979c048e1ec6ea06f3001201cdafef489c4470e1.jpg) +69.9 + +# Camera Control + +# Evaluation: Camera setting adjustment, semantic consistency. + +GPT 40 + +![](images/b0d136fb714a9be563b2f3204d0e0bad1bfffa49452c0d77a5bdfb3a14e364d0.jpg) + +![](images/6353301ea1a4bd02c64e31effbe95a706a80640c23bedd6dff2583457c4933e0.jpg) + +![](images/f748ae519d07deeba518763dcddb8fac9d6dd17225e417618ff62357c72bb0c3.jpg) + +![](images/a145180a5acb6bef4bb07f8a9b725b80a966f60fdc5991366b23f4cc7d100d3d.jpg) + +![](images/d215069ec65dc0f111efbae6b16c00981d2ebba9711197049d3d10d379a5fe6f.jpg) + +Gemini 2.0 Flash + +![](images/34364cd752d523f7253e2bed26e26e7bb958064d8b44d8df3b0c491d05214cb2.jpg) + +![](images/8107bc9ed92dde4a8075837a95dd9e6711b30dd5e1a18eb7914511ac730f010f.jpg) + +![](images/e8ab46f36bede74d3eb3841233ac4c2a6eb5643b1d97d4921f99dd2c221aef40.jpg) + +![](images/a26d98ea46dba3acd2775aa3134da02bf0e703e9b8a805247053fdfe1b12d8bc.jpg) + +![](images/caed113ea41a0bbb54c520a911d0e22a31ace6434333320e221dd66e5f86ef33.jpg) + +GP + +![](images/9d2ccc2dc8ada5802d342f8533d6e9a320919119569352e82afb4bf3500698e0.jpg) +0.88 + +![](images/dd17353d44f66bbb4b35301466dd1088e56a6dbb759f8d6e1a798e291070cdb4.jpg) +0.68 + +![](images/c65293d5487451040c046c09049d817ac48c8d7e2a82e633e0d317f853e37512.jpg) +0.48 + +![](images/1a0a8291fc0eb54d480890af32530069802f1d52bc82f8da414ec15032e47f7b.jpg) +0.38 + +![](images/0b0cfcb2c756d34ecf6d719996e6b93b05c237d3b9298e8a45b297f87db15d04.jpg) +0.28 + +GPT 40 + +![](images/2f8ba2525f180355e9429c8aa78eab3fdc5e1c1c77db35dd88729c6608f735e8.jpg) +Input Text: "A blue pot with a plant in it is placed on a window sill, surrounded by other potted plants; with shutter speed * second" & "Adjust the shutter speed to * second". + +![](images/e2bafc2e24743ad788bd64f544fbd0056daacff8e5ccf84bf8a7e7d5bd57b702.jpg) + +![](images/65a3cacd9307277d2c15e32c391395ee4d10278b0cde3e7007004223bb26c6c9.jpg) + +![](images/4851ec2102693629a48d4c59a074117a859cc53c4e7a1a739b2b4623d6768712.jpg) + +![](images/c55187e3cf1b46125c32de8fa1ced6d268639035921d9b205dcf8ee53eaef80f.jpg) + +Gemini 2.0 Flash + +![](images/fe6974e0162720276dc4d0b553ff8e8fdee18edf7ebaa81e761e6d01fa924c91.jpg) + +![](images/c30bd99d0cf615babc90bf1be839dd3875329913446e1f8f05070addcf8105c3.jpg) + +![](images/de07423ffdd0dc744cf6f4c7b38144d08ecc3112098ef6d6efbfb06253fb9def.jpg) + +![](images/9df5012deab1c75c8cc3c9b199f8d51adaf06952378b5293634b0a061a791b0f.jpg) + +![](images/76238724fe5435d83934f4d9ceaa07fd016a8c9970f6377cd6eb7afa1409a5d4.jpg) + +GP + +![](images/077315bfabdff852ee00ffcd33f6dc6141773a534acf687e5c1e7ad5284a7a19.jpg) +3100.0 + +![](images/334e57c72d615f0c16cf7f289ed3e1236adf82ed7e69d06552fc24a1910711db.jpg) +4000.0 + +![](images/838fcf545b25f35bfeec3bef84c6f0c0db6ecfcb9f88eaec03caefa49c2b0b1c.jpg) +8000.0 +Input Text: "A collection of trash cans and a potted plant are seen in the image. The trash cans are individually in blue, black and yellow; with temperature * kelvin" & "Adjust the temperature to * kelvin". + +![](images/3569ee4edc461e7da1c8372a4a522d92cca61c99b430368708a6b7dc82703262.jpg) +7000.0 +Figure 42: Task: Camera control. The goal is to generate images aligned with specific photographic parameters, such as bokeh blur, focal length, shutter speed, and color temperature. Setup: Results are based on text prompts collected from [118], comparing outputs from GPT-4o, Gemini 2.0 Flash [99], and Generative Photography (GP) [118]. Each row includes the input text instructions and corresponding outputs. Observations: GPT-4o demonstrates strong performance in controlling color temperature, producing coherent and visually accurate results. However, it struggles with shutter speed, occasionally resulting in inconsistent or unrealistic motion effects. In contrast, Gemini 2.0 Flash fails to consistently handle either parameter, often producing outputs that lack alignment with the desired settings. Overall, GPT-4o outperforms Gemini 2.0 Flash in this task, but further improvements are needed for precise shutter speed control. + +![](images/a99706fb05eb2a9239f7691575789b91486fcd20c7175b2b5dd9b1fbd088f850.jpg) +3000.0 + +# 2.2.8 In-context Visual Prompting + +The in-context visual prompting tasks aim at understanding and executing specific tasks on new query images by leveraging a pair of task-specific example images and accompanying text instructions. Previous works [105, 18, 52] have explored this capability in the context of diffusion and autoregressive models, demonstrating its potential in enhancing model adaptability. The significance of in-context visual prompting lies in its ability to enable models to generalize to novel tasks. This approach mirrors human-like learning, where new tasks can be understood and performed by observing relevant examples. This capability has broad implications across various domains, and paves the way for more flexible and efficient paradigms capable of adapting to a wide range of specific tasks. + +We curate four representative tasks to evaluate the performance of GPT-4o in in-context visual prompting. These tasks are designed to assess the model's ability to understand and adapt to specific visual tasks based on provided examples and guidance, including: + +- Movie-Shot Generation: A three-shot image collected from [42] is provided as an example, and the model is instructed to follow this format to generate similar movie shots for the query image. +- Ray-Tracing Rendering: An example gaming scene is provided with and without ray tracing, and the model is expected to render a ray-traced version of the query image. +- Overlaid Mask Visualization: The model receives an original image accompanied by its corresponding segmented results from [49] and is tasked with outputting the segmented results in the same format for the query image. +- Maze Solving: A maze and its corresponding solution path are provided as examples, and the model is required to draw the solution path for a new maze presented in the query image. + +All the results are illustrated in Figure 43. Compared with Gemini 2.0 Flash [99], GPT-4o demonstrates promising performance in movie-shot generation and ray-tracing rendering tasks, showcasing its ability to follow example formats and generate visually coherent outputs. However, it still struggles with maintaining consistent visual semantics across the generated outputs. For the overlaid mask visualization task, GPT-4o falls short in effectively executing the instructions. The result fails to adhere to the required format, indicating that the model's ability to process and generate complex outputs remains limited. For maze solving, a task that demands advanced visual reasoning and logical inference, GPT-4o struggles significantly. This highlights the challenges in combining higher-level reasoning with visual generation capabilities, suggesting that more sophisticated reasoning mechanisms are needed for tasks of this nature. + +In summary, GPT-4o shows considerable potential in in-context visual prompting, while it still underperforms in certain difficult tasks. These observations suggest that further advancements are necessary to enhance its generation and reasoning capabilities for more complex and diverse visual tasks. + +# In-Context Visual Prompting + +![](images/751b8b0b723ad8d89ba3583fc6932d850eb7cf8a76f7215ed994d255821fb63e.jpg) + +# Evaluation: Understanding and executing specific tasks with example images. + +![](images/7e5a6dca5061982b8b4f23f693eb84c50a375b77e068c461c42e68104c157985.jpg) + +![](images/70e1fc27eedb02031d28c9885667468714afce1b37646dd33a83ef114dce4e78.jpg) + +![](images/5f022a9d8700e671b60295a4ae5dd57ff7c7747fa42c0d5b9d6fb09f4fe1b014.jpg) + +![](images/49f93c593df590166d47657e6c0eda2ed7ef62cdd094e685de672efaec343681.jpg) + +Input Text: "The first image contains three movie shots. Please imitate this image and create the subsequent movie shots for the second image." + +Input Image + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +![](images/e63aaf3e7583e329733bdf0d0086b196c266732e65fcc124b54b13a2ae6da734.jpg) + +![](images/05f1f17e9903bfc312375e98f666c3fae0cca18b6d2b7c60fe1605e04fefa3d0.jpg) +Input Image +Input Image +Gemini 2.0 Flash + +Input Text: "The first image includes an original gaming scene, and the scene enhanced with ray tracing. Please imitate this image and create the scene enhanced with ray tracing for the second image." + +![](images/27c5eea52e6ca0c737ae2cf46a953e11f8a44064e52b132ad803df249ab60e5c.jpg) +Input Image + +![](images/0755edc88c21d2311018ccff73e7808905f504f9fd16b4b534f5cee1942b18b2.jpg) + +![](images/2c63f34552d2fef8c8e86f7dd76a62315f4c1dd4bcb26b2198e0e7acd23d6ef7.jpg) + +![](images/1fb2c646d1d34c3087486d5485c780d38c7e518b41b9210914f00d583ccfd2b1.jpg) + +Input Text: "The first image shows an original image and its segmented results. Please imitate this image and output the segmented results in the same format for the second image." + +![](images/4702bc6682345e1ecd0d870da7f607f3851d574f86f2af27b69b7ce8feb8d7b3.jpg) + +![](images/4cefd3a472618bf4ed1b900b8f0253d8ca072ad60e16e238d601280a1ff4ac20.jpg) + +![](images/a05ae86c57f255f29915d6f1ca7fbca481e453d5260a2ee80c1df5f1a84c31e1.jpg) +Input Image + +![](images/fe38be0bc78d431b48a8e0538e8cea074d5ce5e421511c2e081ea26e6ad8173c.jpg) +GPT 40 + +![](images/acc8a88501794d5cb5abb73802e25f714233bc33723c8874cf9c6c27f2a19f7d.jpg) + +![](images/2a04299128063c19b3d18cf2c84c9a40f476b3c8ef92b747667e18981dce0475.jpg) +Gemini 2.0 Flash + +Input Text: "The first image displays an unsolved maze and the maze with a solution path in red. Please imitate this image and identify the solution path for the second image." + +Input Image + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +Figure 43: Task: In-context visual prompting. The goal is to perform specific visual tasks on new query images based on task-specific example images and text instructions. Setup: Four representative tasks are evaluated: movie-shot generation, ray-tracing rendering, overlaid mask visualization, and maze solving. Each row includes example images, query images, and the corresponding outputs. Observations: GPT-4o excels in movie-shot generation and ray-tracing, producing coherent outputs but lacks consistency in visual semantics. It fails with overlaid mask visualization and maze solving, showing limits in complex task integration. While promising for in-context visual prompting, it needs refinement for more complex and reasoning-intensive tasks. + +# 2.3 Image-to-3D Tasks + +We evaluate the 3D understanding capabilities from 2D images of GPT-4o across three tasks: 2D image-to-3D modeling, 2D UV map-to-3D rendering, and novel view synthesis. + +# 2.3.1 Image to 3D modeling + +Generating 3D models from monocular images boosts a wide range of applications, including augmented reality, virtual reality, and the gaming industry. This capability not only facilitates the content creation process but also mitigates the reliance on specialized 3D artists for creating 3D assets, which is more time- and cost-effective. Therefore, there is a growing research interest in generating 3D models from 2D images. Early methods on image-to-3D employ the learning-based approaches for single-view reconstruction [74, 77, 102, 79]. Recent works leverage the diffusion model prior to perform image-conditioned 3D generative modeling [69, 68, 83, 113]. + +In this section, we investigate the potential of GPT-4o for 3D modeling from 2D images. We begin by prompting GPT-4o to generate a Cinema 4D modeling interface to test its ability to produce coherent representations of structure, material, and wireframe based on the input image. As shown in Figure 44, GPT-4o can generate high-quality 3D model renderings within the application interface. Notably, the generated models exhibit clear wireframes and textures consistent with the input images. In contrast, Gemini 2.0 Flash and Midjourney v6.1 fail to achieve comparable results under the same conditions, which produce inconsistent renderings. We then prompt the GPT-4o to generate corresponding 3D object and material files in .obj and .mtl formats to further evaluate its understanding of the underlying structure in the rendered images. However, the output 3D models are coarse and inconsistent with input images, indicating that although GPT-4o can produce visually coherent 3D renderings, its capability to transform these into accurate and usable 3D object files remains limited. Additionally, Gemini 2.0 Flash and Midjourney v6.1 do not support exporting 3D models. + +# 2.3.2 UV Map to 3D rendering + +UV maps are 2D images that store texture information for 3D models. In 3D modeling, geometric data is represented in 3D space, while texture data is defined in a 2D texture space. UV mapping is the process of projecting a 2D UV map onto a 3D model, accurately aligning texture with geometry. The UV mapping process can evaluate models' capability for 3D perception and spatial understanding. Moreover, this task has broad applications in design, helping to reduce the burden on designers to create product renderings from 2D maps manually and provide useful references. + +As shown in Figure 45, GPT-4o exhibits a superior ability to generate consistent 3D renderings from 2D maps compared to Gemini 2.0 Flash and Midjourney v6.1. However, some outputs remain unsatisfactory, displaying inconsistencies in patterns and structure (see row 3 in Figure 45). Gemini 2.0 Flash struggles to correctly wrap the 3D model, though it maintains pattern consistency. Midjourney v6.1 tends to introduce additional, imagined features, which reduce controllability in this task. + +# 2.3.3 Novel View Synthesis + +From a monocular view, humans can imagine an object's 3D shape and appearance since humans have collected enough prior knowledge for different objects throughout their daily lives. This ability to infer novel views of objects is essential for a wide range of tasks, from object manipulation to artistic creation such as painting. Early works achieve image-to-3D reconstruction using category-specific priors or large-scale pre-training [45, 80, 87, 32, 131]. Recent studies have shown that large diffusion models contain rich 3D prior information of the visual world, enabling them to perform novel view synthesis [69, 68, 83, 70]. These novel views can then be used for zero-shot 3D reconstruction using different 3D representations such as NeRF [76], mesh, or SDF. + +In this section, we evaluate the ability of GPT-4o for novel view synthesis on objects with artistic styles and asymmetric geometry. As shown in Figure 46, for artistically styled objects, GPT-4o and Gemini 2.0 Flash largely preserve structural consistency with the input image, although they may change some elements or fine details. For the asymmetric object, GPT-4o can preserve the object scale and size better than Gemini 2.0 Flash. However, Midjourney v6.1 fails to generate consistent novel views, instead producing visually appealing images that do not align with the given prompt of this task. + +Image to 3D Model + +# Evaluation: Shape/texture consistency, wireframe plausibility. + +![](images/1f9b0b011e88c337096f493dbdc16bbafefb87c7ac6d0817b51a02d65ae211a2.jpg) + +![](images/20925173299f15c6d10bdcdcb9ec379e65b18d75af2a57d652be5a8ff0b8001d.jpg) + +![](images/afb58fab9b72f3bf1b78d415b4d5e8ca817660113f2b9a1da7dee7153ca97330.jpg) + +![](images/1b8601f013b93c27baf25e41c09132b903155d83d0ba6b9bdcd83e2b33d24788.jpg) + +![](images/7add957f9d7531f536cfef81731bc6b28f75e12995ba9dbfe3e545658a323c7d.jpg) + +![](images/8d55ee618dd53e3a5e154598b93d67afc79a4d7bf139ce03e31d740af22b29f0.jpg) + +![](images/9149d06a0cd845a92028c8f8199da8213692d3c85666f692dd350f78144af753.jpg) + +![](images/666712e651824dc36d467ea97f632d637319b03414823161f4a04e13a521f767.jpg) + +![](images/c46b403e7ac2e945512ffa6d309f1e6f20a355598593e3758059233cf042e2e6.jpg) + +![](images/b15793d17ca7c80bf1b909d279a47bdf5b3a139165326ca3c1dffd011f7b0722.jpg) + +![](images/875d4fca774409cc984e2951d3b6c99aa12608a7a78632a41fcd84f6bd223082.jpg) + +![](images/1a0ac4c5dee91e1501338bfcaaf66863752c28b3f6438eb24488b2e78087a942.jpg) + +![](images/f5a97061e0c642e8ad66652a68547da7ce296666b9869a78d2e0233544c9a488.jpg) +Figure 44: Task: Image-to-3D model rendering. Evaluate the 3D modeling ability given a 2D image. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o can generate better 3D model rendering with consistent shape, texture, and plausible wireframe than Gemini 2.0 Flash and Midjourney v6.1. + +![](images/a11e7e5a889c46ee27cc17d0f091d77cb1c167e6c4314649ed219089b6bfe94a.jpg) + +![](images/4b00229ed17a9f68b0777d94b5bc1f08385f38b87d3759265475e7b14d5c6df0.jpg) + +![](images/67c90170597a34e84c1311ea2c3f363b473991486ba6bf0a15957d95db598921.jpg) + +Input Text: "Generate a pre-render view of a C4D model, including the UI, wireframe and material." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +Midjourney v6.1 + +2D UV map to 3D rendering + +![](images/5df66609cbc1920bfd7bfab7731dee320fbf9bd64ef127bfe114205f6384aaa1.jpg) + +# Evaluation: Structure/pattern consistency. + +![](images/9410af00d870c0c0323ba5f3b9c30e57a1d3b4116a79dda9d22fbb7f6ef86c7f.jpg) + +![](images/0673e0b100dc90c3154a0f84f46af572d02b7c0ce4f3ef16fade43705be3a169.jpg) + +![](images/39a867d5c76eb0ae5b81b696fa69ab0660b5b5250864e6b56ff6616c1d01cf18.jpg) + +![](images/f00108f1137a655c41931a3eca3d59fb3d37c83f487aa21775f7ef258746a7e5.jpg) + +![](images/c720d7639945658d54f942ed378b4e165f730b8de7a268e21417078ae525472e.jpg) + +![](images/50e1bcc4270a236adf25775994fc41829c4d6c762fc5cef6ca4d34dd425a43e5.jpg) + +![](images/34545e79e79f3078a365a6cb3d938519fac2d3ed1ebd3db57368a178779167ae.jpg) + +![](images/e914339e66bb3680ffe4e499cab448aa16a263e1ab23c54fac562df95422444c.jpg) + +![](images/028da69b538ec977850baf21c5611c81d88bf976beeeeeccb70474beba17944b.jpg) + +![](images/915474b5475b9cfb5be03f5b44afa9a58d8047bd5530980a2d330a11b5d8294c.jpg) + +![](images/391fb13f1c421b143115f7495cad2522d8d46310cd74bbe3fd2d5b33986486e1.jpg) +Figure 45: Task: 2D UV map to 3D rendering. Evaluate the 3D perception and spatial understanding ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o can generate better 3D renderings based on 2D maps than Gemini 2.0 Flash and Midjourney v6.1. However, structure and pattern inconsistencies still exist among these three models. + +![](images/54e8f1e07b35c7646e6a1992f478ea6768d117a77bc4a4fc4afcd51a7f5a0920.jpg) + +![](images/3aca5dc86dd3946c68be0172061baa23975fed2075ecacb889e265f89089a1e4.jpg) +Input Text: "Assemble this packaging cutout into a complete product and output a 3D rendered image." + +![](images/0a94fc94c0714aa10ea27a9a7909f7e8481229af8d64048c14d36241ab29679f.jpg) + +![](images/302a3887daa57a9516027cfc6473b7293f8f9c6bd1007fa32f38d00d07543191.jpg) + +![](images/887ddccf2929d6f4372e9362922daad36a1526e449412d7bfea8a7165bea64b9.jpg) + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +Midjourney v6.1 + +# Novel View Synthesis + +# Evaluation: Consistency. + +![](images/2fc6af7cdb8c9f7e71021c537aa0aba9a3610880f2762857e8317da88256e3c0.jpg) + +![](images/e502bbf30709fbc0fd7fe24f3e4886279e719fcc412c016e104a0028ed894e18.jpg) + +![](images/a8a92b2a237654ea8a1e5c6eca6a9384ebacc2f436fc6dc6a34e87dd215b9d99.jpg) + +![](images/763c96045715d809c9e422f9aa615105eac65d7140529630622d8010d75de173.jpg) + +![](images/3dbfbe5de303b95e93930efc7c11ef12558704eeaae8642a301b7dbc59420786.jpg) + +![](images/92052199d2635356816ee2aba2aed1f8dca8d83b493fd47666fe5985bfd49a89.jpg) + +![](images/d2dc794b351faf6fef98c864ed3e399cf079c26ef90eadbb34e0c6fc3e4be78c.jpg) + +![](images/4ae43763ef324638250f7125268cf0a0cd1fadff5496f7eae1707c4aaad3ee1c.jpg) + +![](images/97cb4999fb7546f2acbbc518825a138f6e835d048cf927abe4812e6a75004c0d.jpg) +Figure 46: Task: Novel view synthesis. Evaluate the 3D perception and spatial understanding ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o can generate better style and structure-consistent novel views for both artistic painting and asymmetric objects. + +![](images/d895489817a6f21c887841484b153778383ad7ed2f565fe9eb3bf5a334aa848c.jpg) + +![](images/7f93176c11f8bb6fd47383525af946efe1f62c62e955b8c39ee12e4183f7af70.jpg) + +![](images/2ac37d8f19a0434436d70e6adb1d281a20d590ab3f0d0d7a7fa32893d0767278.jpg) + +![](images/1c5838ea94c22768e1999617b5ceda45f55d61e50d2423844f55c0de5fe7e177.jpg) + +![](images/06dfd66ade1356c7cd3d3f8e966e4e534aad24c5c887a40d0ac06dfd803d92ab.jpg) + +![](images/89c711b51bd739f9ec7fcc44d62889f885c5ec915e0fc0cda6d670f5ca049e58.jpg) + +![](images/5bd9acccaac95cfe7fe77ac7db21eb318d54092425bd5f07277c03e7499c903f.jpg) + +Input Text: "Generate three views of this picture." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +Midjourney v6.1 + +# 2.4 Image-to-X Tasks + +In this section, we further evaluate both GPT-4o and Gemini 2.0 Flash for several dense image understanding tasks, including segmentation-related tasks, depth estimation, normal estimation, matting, salient object detection, edge detection, layout detection, text detection, and object tracking. + +# 2.4.1 Image Segmentation + +Image segmentation tasks group pixels of the given image or video into semantic regions. It is a fundamental problem in computer vision and involves numerous real-world applications, such as robotics, automated surveillance, and image/video editing. With the development of recent deep learning methods, this domain has achieved rapid progress. Early works mainly adopt CNN-based methods with large kernels or respective fields. Recently, transformer-based methods have also worked well and surpassed previous CNN-based methods on various benchmarks. In particular, we test three segmentation tasks, including referring segmentation, semantic segmentation, and panoptic segmentation. + +Referring Segmentation. This task outputs the corresponding mask according to the input texts, and the goal is to test the pixel-level grounding ability of the model. In Figure 47, we compare GPT-4o, Gemini 2.0 Flash and recent state-of-the-art method, Sa2VA [117] (8B model $\dagger$ ). We show five open-world test cases. For the first two cases, GPT-4o shows the coarse localization ability on the background region. For example, it can mark the grass region despite the unfavorable boundaries. However, compared to the SOTA method, Sa2VA, GPT-4o mistakenly merges both large regions. In the third row, both GPT-4o and Gemini 2.0 Flash cannot perform grounding with complex text inputs. In the fourth row, all models perform badly. GPT-4o generates an unseen chair in the images while Gemini 2.0 Flash performs image editing functions by replacing the smallest chair with a normal chair. Sa2VA also segments the wrong object (the nearest chair). In the last example, GPT-4o also cannot segment smaller objects ("bag"). For all examples, both GPT-4o and Gemini 2.0 Flash modify the image contents. These examples indicate that GPT-4o has weak pixel grounding ability. + +Semantic Segmentation. Semantic segmentation assigns each pixel a semantic label, which is one basic vision task. In Figure 48, we show several test cases on the semantic segmentation task. In particular, we adopt Deeplab-V3+ [14] (ResNet101 as backbone, trained on Pascal-Context) as one expert model for reference. Surprisingly, the mask quality of GPT-4o is good on four examples, even comparable with an expert model, Deeplab-V3+. During the testing, we find the texts may be randomly appended to the masks. This is why the first row differs from the remaining examples. For the second and third examples, GPT-4o misaligns the text and mask regions. Compared to Gemini 2.0 Flash, GPT-4o has a much stronger ability in semantic segmentation, particularly for mask shape. However, there is still a lot of room for this task, including a unified semantic segmentation format, enhanced text and mask alignments, and more correct mask labels. + +Panoptic Segmentation. This task assigns the foreground region a semantic label and assigns one mask label and one instance ID to each instance, which is a unified task format of semantic segmentation and instance segmentation. In Figure 49, we compare the panoptic segmentation ability of GPT-4o, Gemini 2.0 Flash, and one expert model, K-Net [123](trained on the COCO panoptic segmentation dataset, with ResNet50 as backbone). Overall, the mask shapes of GPT-4o are good. The model can understand the panoptic segmentation task, while the Gemini 2.0 Flash cannot do this task in the first and third cases. However, the spatial locations have been changed for all cases. The generated masks are in part-whole formats and are even finer-grained than K-Net. For example, in the first example, the jersey number (17) of the person and the hair of the people are also marked. Meanwhile, we also find a similar issue: several examples have text, while several do not have text, even though they adopt the same text prompt. In addition, GPT-4o can distinguish different instances with different colors, despite most of them not being good (see the last example). + +Image-to-X + +Evaluation: Referring Expression Segmentation, Grounding and Grouping. + +![](images/dbd74c382963ca8096f3df2cbf8c15b1dc9bb0dcf7b14f2691ca5f3969de4ae6.jpg) + +![](images/0602a903854b76208c57b28e4349dae82df8b2aa2e65f275df6b6940b8ec8061.jpg) + +![](images/03c4e2f2110cca390cec16e188035acf44295367b91027a771e4e53dedaf79e1.jpg) + +![](images/452eb4a0a98c3bffd9b5ca2149205d5cd34827599c8caa097cbd6d949a07b14c.jpg) + +Input Text: "Please segment the grass in the image and directly generate the output image." + +![](images/8f284b6c611448bfc951157e9a8d4ed20c6128da83b6852e318506b931aab1de.jpg) + +![](images/f99ed011b160f34447b7db945e73c93bb19c1fb9a7888322a3f61e89fa0b9b59.jpg) + +![](images/c5ff8325e12ed6030f6fd0a88c40d2600c49d7423ec72c29d20775bcecc460a1.jpg) + +![](images/e331257e3a002ea35aae9b3560e329ae51aea6ec05a4a8f8fc4c533cfe981c94.jpg) + +Input Text: "Please segment the sand in the image and directly generate the output image." + +![](images/f2338a3386d7dafb1d4ed2f0d097545a66aaae303a0ec9c364235ccd56f6fb11.jpg) + +![](images/94ec2cb93bf6619e7cff5d27b701fd16348dc2330513de806c10891f384b214e.jpg) + +![](images/bc7f355d6226e9ae8b3bf01b1ec603dc9a91ef5a37b5df843f5fa2d53f805b40.jpg) + +![](images/54845239cccd77ffed346bbbb01a2390dc01dd6649c038184a108f0d78f6e18b.jpg) + +Input Text: "Please segment the table beside the black sofa in the image and directly generate the output image." + +![](images/40eaf0cc2887a61189056995988a25e411abbb492721dd306939eb435cb2c205.jpg) + +![](images/fd5210b3394235c1477fe979838a108f3de32982a0527224a4d83dc207ae62bc.jpg) + +![](images/900798659953d6bf5da0b4a07b8d732ff649b11f079acb0d0b7f9e3317082e6e.jpg) + +![](images/44a28c987decc43c2c7016a554b05881a1fabeb9cb87f073992406fc3bf4eeb3.jpg) + +Input Text: "Please segment the smallest chair and directly generate the output image." + +![](images/7be4bdc21bc3e0a0da4304091bcaaf819d4c752aa483ba4d25c1add751eaadcc.jpg) +Figure 47: Task: Image to X: Referring expression segmentation. Evaluate the grounding and grouping ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Sa2VA [117]. Observation: These examples indicate that current GPT-4o has weak pixel-level grounding ability. + +![](images/ddccd12b79d35e83a67c0ec67ebca9d6ffdf81f90dbdc128897666c54e5ca249.jpg) + +![](images/82c794a5d723f0b36ec6540fc27ae81b30d682d013d9335993d95bcec71b79ae.jpg) + +![](images/cf3e7ef91003317002f53f486a244b5640ca63c14670c2003aa978121ccb908c.jpg) + +Input Text: "Please segment the bag in the image and directly generate the output image." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +Sa2VA + +![](images/a71db3675a1fc3707882b1ea7573faaee81a29d969a48f7917d231e5c96013a7.jpg) + +# Evaluation: Semantic Segmentation, Shape and Grouping. + +![](images/dfa857e58ea3978f96a70b80835f05da2c3c164ea663a535310bee9d91124824.jpg) + +![](images/bffe05ee07a77a1df7cf6cc2b2d3a0cb232aa99efd565f3002c170b0ea6c49e3.jpg) + +![](images/1fc3e7193935e21d941ba2a69b2a1621e26533675595f4e66648793cc5c521ca.jpg) + +![](images/89990c180c4a599a243294b0904684d0351ed3529a4375d02a3ffb1ea5cc5012.jpg) + +Input Text: "Please generate the semantic segmentation result of the image." + +![](images/fbc284435daba3dd588193455507d1744f38c42e568b7de9d51aacf24568bbd6.jpg) + +![](images/6a5d14299085340af2af8df55c7d78da1c76cf94e7c40072a4aa894710067730.jpg) + +![](images/70b3977d31710e5b25126e9195e5448eee93a4c761b993c52e1db1cb26c59702.jpg) + +![](images/835c092bc8f3a790fdd3ce19ea243873bcd1e8c977f1e3b28cb6589fd8a4f735.jpg) + +Input Text: "Please generate the semantic segmentation result of the image." + +![](images/0759fb223e415e0f19710f612a3c80b614bae80774a18396353bf7b14eb7da5d.jpg) + +![](images/9915d6c0aa3e038b5ec03ebeb8dcca7a1b7496741251e2af39361e947240d627.jpg) + +![](images/07315caab9c8a97608642744aa18c2e35c6920d402732aa3a2675180b0a7af1e.jpg) + +![](images/4bdca3b7ab2d59c6d6c1458affcfac976bc7751cd6959565ea93ddf768f48481.jpg) + +Input Text: "Please generate the semantic segmentation result of the image." + +![](images/1b277bbf258a90079c84ec982452f71ac0f350730f9aea170064b358fe0eb923.jpg) +Figure 48: Task: Image to X: Semantic segmentation. Evaluate the shape and grouping ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Deeplab-V3+ [14]. Observation: Compared with Gemin-2.0, the mask quality of GPT-4o is good. However, there are still huge gaps in the standard semantic segmentation format. + +![](images/a6a106419b52434d8a55c2b5fb821d02c748ecc0597904cf146832238e26aadd.jpg) + +![](images/fd3f62eae10787f13ed9ce858671690809ab040be8b604ced0e3044aef5bc8d9.jpg) + +![](images/aa66b3606a91f980129cef7b09a646b9b32aae5937a1a800da9a62b68a314796.jpg) + +Input Text: "Please generate the semantic segmentation result of the image." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +Deeplab-V3+ + +![](images/9fe5199286f0112238a7073ea1794d37f5662a85af0714537b15349dcc732e6c.jpg) + +# Evaluation: Panoptic Segmentation, Grouping and Shape. + +![](images/dfdea49947535ad053094764e4f1c4f57b87c66ef48041b87f0c1403e8ebfb06.jpg) + +![](images/0d5ac0ba593b14c0ff43464e9d8cb2a78e3c44bedcbbb29c36a51144ad9c8c34.jpg) + +![](images/ae911cf1c44c913e1579fdc6ee25f6acd40fabf3b6bb39cb4fd1df2fd7c49995.jpg) + +![](images/1b3602b90061d565f84e84ed00f2de84147c20f674c0bc022d3fa5b9bd5926a9.jpg) + +Input Text: "Please generate the panoptic segmentation result of the image." + +![](images/9164a12913f12d50e14960da007a5f198f424495fc1f7b5da62b254cd8bd48e1.jpg) + +![](images/3745e5cc44c159dbc30c05e6abfee39434906086aa082386ab669e6a3800719e.jpg) + +![](images/600cb4b1adf7ff450977eb1ff9c1356ca0f2202a614d5297fdfa99e34eb010fc.jpg) + +![](images/1e96cf20eb72b581b66ee41d6ac4ce09a54c1964332e8249d136a88fe87afae1.jpg) + +Input Text: "Please generate the panoptic segmentation result of the image." + +![](images/bba2b69130e87b6f2573ea275d85a084e8a7be0d184cc6aba44beed927746b1f.jpg) + +![](images/98a34d23216e8ad84b654750599c6cf856ae93a257ecd204feb4347ff25f2411.jpg) + +![](images/e19a526ae728ef205a6d09c27b250ea5bcb7433bc30ff0fe90075b9fa80b7e50.jpg) + +![](images/36a0b0bce8ab290695f007ef9c9240f85222009951f43efd16512c909c48a75e.jpg) + +Input Text: "Please generate the panoptic segmentation result of the image." + +![](images/a3692ab260876169e43b10f0a13b3e0a2358a469179695d6391b7121a291f656.jpg) + +![](images/04d61817b36efe3075ecd5a5ab06c82b578abc68918b2566f24ba1ac9e539e1b.jpg) + +![](images/76af15fc690cc0e7fcb7cc802c4e12a650c1bda8daa750afc68a7050560ab699.jpg) + +![](images/f550448a4b54f7ddaffc6aa7b44e8f05342e71009989a87f527ce649b3de480a.jpg) + +Input Text: "Please generate the panoptic segmentation result of the image." + +![](images/7ea2069c70699c15434b740761c5c8214d80002c7982c766b3b5d6af3f7b2f9d.jpg) +Figure 49: Task: Image to X: Panoptic segmentation. Evaluate the shape and grouping ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and K-Net [123]. Observation: GPT-4o can understand the panoptic segmentation task, while Gemini 2.0 Flash cannot do this task in the first and third cases. + +![](images/7a80ead376e4c816f64243eb12d3db5695241cc5988afc361964743b675d9e97.jpg) + +![](images/d1b34b2ffc2bfa4c9301c7d1c2ce46deef595c8d9d33f80db8a4a3538f6a89c5.jpg) + +![](images/2e8e24d1cba72b5e1d3fa71c6329231f5130c1a2c84e08710a2bfcef26d50f3e.jpg) + +Input Text: "Please generate the panoptic segmentation result of the image." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +K-Net + +# 2.4.2 Edge Detection + +Edge Detection. As a classic vision task, edge detection aims to identify the boundaries or edges of objects within an image. These edges represent the locations with significant changes in image intensity, color, or other visual features. Common edge detection operators include the Sobel, Prewitt, and Canny operators. Recent works adopt deep learning-based approaches. + +In Figure 50, we compare this ability with a recent SOTA deep learning based approach, EMDB [56]. For four examples, we find both GPT-4o and Gemini 2.0 Flash can detect object edges for both foreground and background objects. In addition, the details are even good using GPT-4o. We find two critical issues: 1) The spatial localization of GPT-4o is changed as observed by the segmentation tasks. 2) The content of GPT-4o is also changed. For example, in the first example, the road is generated, which does not exist in the input image. + +Image Matting. Image matting is a technique in image processing that aims to separate a foreground object from its background and obtain a detailed alpha matte, which indicates the transparency or opacity of each pixel in the foreground. It goes beyond simple segmentation by providing more precise information about the boundaries and fine details of the object, especially for complex objects like hair or smoke. + +In Figure 51, we show three testing examples, with one expert model, Matting Anything [53]. Compared with Gemini, GPT-4o can handle the simple cases, as shown in the third row. Thus, it can understand the task goal. For example, it can even keep the fine-grained details of a horse hair. However, considering the strict requirements of image matting (fine-grained and aligned details), the overall quality is bad. Compared with Matting Anything, both GPT-4o and Gemini work poorly. We find nearly the same issues: 1) Wrong spatial localization, 2) Changed contents. + +# Image-to-X + +# Evaluation: Edge Detection, Shape Analysis. + +![](images/0cfb348c56fe1101617d57441e326b36eadf434565b6c1880650127baaf8224e.jpg) + +![](images/7bf3e8e8fcd88c5d02dc116f91c2aa8891be20f22eda6960e71e5c24543006c7.jpg) + +![](images/d8de1fb96772ae2a9a2c7fa60dfee2b9c9838cdd7633d066debcab1d110abac9.jpg) + +![](images/c57774706ddf76b7eafe1bc39859e9e41d2e9be8a2ed40d9b69c030d906ac930.jpg) + +# Input Text: "Please detect the edge of object in this image and output the final image." + +![](images/35081435a9dddead08f2e1591ed883e214e7ed45a52ef1bb2df34789a8f65f50.jpg) +Figure 50: Task: Image to X: Edge detection. Evaluate the shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and EDMB [56]. Observation: We find both GPT-4o and Gemini 2.0 Flash can detect object edges for both foreground and background objects. + +![](images/61a9787b52f106410ced18bbf723e46ef2d3db614a1b9796dd588397ad04206e.jpg) + +![](images/a0ebad62eca5fc477e0815751adacf1e46b3abcdce6c306101ecb96b7043a8c4.jpg) + +![](images/425b5ab97efea03f5a9f305b7eb62c794a077f423e347f1fbc1c194faca7ed85.jpg) + +# Input Text: "Please detect the edge of object in this image and output the final image." + +![](images/63d21e13c3a1a35a1fd1c9cedc021755050cc1398455173eaf4da3af031229aa.jpg) + +![](images/1e393f9e4a534fd74efa40049b2b251a874ba84af7a4ec4b9bafb559308b436f.jpg) + +![](images/7af8979fc8ec37fdc42425294aa893ae45a15b13d8806f2f8c4eac75eae9952b.jpg) + +![](images/b41cebcb69ab1488c833010b4ba4c0e40ecd6d2c22ed7583995ccecfc50154af.jpg) + +# Input Text: "Please detect the edge of object in this image and output the final image." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +EDMB + +Image-to-X + +![](images/7ee4ab20360492c6594cd85f6e0806bdd6ac326a6b9f951620d2d3e338cdc66e.jpg) + +Evaluation: Image Matting, Grouping and Shape. + +![](images/6e8e2c78c4556e12bb41898d5bd695dbc241b0124a7f8964bc5853e2c5f14dcc.jpg) + +![](images/d4c547c6a3d707292ee701dab81a3f08897230cfc304d056c77c1b62ede0a10e.jpg) + +![](images/4bad73d42a2bdd07f8a75924ab1f205dab5f3e1919f244ff0bab551eb3c0c55d.jpg) + +![](images/38e332ce3a2562225f948c109c071db065296fc2fbfae678e295cb014e1ab37d.jpg) + +Input Text: "Please Please matting the foreground and remove the background. Please directly generate the output image." + +![](images/f36d385ab421fbf6d5de21b9a66c5a418a213e03c378e1602b8431ffa1b78dec.jpg) +Figure 51: Task: Image to X: Image matting. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Matting Anything [54]. Observation: Compared with Gemini, GPT-4o can handle the simple cases, as shown in the third row. However, considering the strict requirements of image matting (fine-grained and aligned details), the overall quality is bad. + +![](images/288f52547eff2bdfad27eac3648ba1b7a88af6beceffdce0913c9495832f57fb.jpg) + +![](images/301c1384224e00b3fa898587c4f5578363c3b8189cb4bd2ba2d363bd9d31a9c8.jpg) + +![](images/b95a63199f82c29f9f6a1d717b00f28bde57c3fa11bf2de4c55eab61642dcf7e.jpg) + +Input Text: "Please Please matting the foreground and remove the background. Please directly generate the output image." + +![](images/e6a106a5ad2b6d2b4ff973ae0d66eb902d883da385dea0f75e0dbef3aaddd4b0.jpg) + +![](images/81b21df800b94c90849050cd7c2fb977ff2ab50081bab2870fe2b7f18fd3d602.jpg) + +![](images/6af19de892cfb5e57fdbd96c732a8b33020d3b991fab34b219b07d9804454a8f.jpg) + +![](images/851db8a91140cd33caf2d0e7e2e00637b208f9f9b4690e97f12ffd0fbb726feb.jpg) + +Input Text: "Please Please matting the foreground and remove the background. Please directly generate the output image." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +Matting Anything + +# 2.4.3 Salient Object + +Salient Object Detection. Salient object detection is a crucial technique in the field of computer vision and image processing. It aims to identify and locate the most visually prominent objects within an image or a video sequence. + +In Figure 52, we adopt one expert model, BiRefNet [127], as reference. For all examples, compared with Gemini 2.0 Flash, GPT-4o can detect relevant salient objects with the text prompts while Gemini can not achieve this. The second example shows that the GPT-4o can generate the aligned salient masks. However, for other examples, the spatial location is not changed where the results are generated according to the input image and potential classes. In the last examples, GPT-4o cannot generate multiple salient object masks, which is also a limitation when dealing with multiple objects. + +Mirror Detection. Mirror detection is a task in computer vision that focuses on identifying mirror surfaces within an image or a scene. Previous works explore this direction by adopting visual cues and geometric cues. + +In Figure 53, we also explore this ability for both GPT-4o and Gemini 2.0 Flash. As for comparison, we adopt a recent SOTA expert model, VMD [107]. For simple cases, we find that GPT-4o can carry out mirror detection, as shown in the first example. For the complex scene, it cannot work as well as the expert model, VMD. As shown in the second example, it generates a fake mirror and leads to a wrong image output with a line to mark the boundaries of the fake mirror. As shown in the last row, GPT-4o treats several rectangular objects as mirrors, leading to several false positive examples. + +**Shadow Detection.** Shadow detection is a significant process in computer vision and image processing that aims to identify and localize shadow regions in an image or a video. This technique is crucial, as shadows can otherwise disrupt object detection, recognition, and scene analysis. + +In Figure 54, we compare and test this ability for GPT-4o. We adopt the SOTA model, SDDNet [21] for reference. For the simple examples (single objects and no objects in the image), both GPT-4o and Gemini can localize the shadow, as shown in the first two rows. For more complex examples, both models detect both objects and their shadows with one mask output, as shown in the last two rows. Thus, GPT-4o cannot handle these inputs. In addition, the spatial misalignments also happen for all the cases. + +Camouflage Object Detection. Camouflage object detection is a challenging task in computer vision. It aims to identify objects that are designed to blend into their backgrounds, making them difficult to distinguish by human eyes or traditional detection methods. This has a wide application for the military, security, and wildlife conservation. + +As shown in Figure 55, we also include one expert model, BiRefNet [127] for reference. For all examples, both GPT-4o and Gemini 2.0 Flash can detect and segment the camouflage animals for simple cases, as shown in the last two rows. GPT-4o can also detect the specific object, given the text prompt, as shown in the first row. However, the same misalignment issues still exist. In addition, it also mixes segmentation maps (in binary masks or color masks), as shown in the last row. + +Image-to-X + +![](images/35b326fb2148865594e763630192efc498f76114d6db2844fc81e1f5810bf287.jpg) + +Evaluation: Salient Object Detection, Grouping and Shape. + +![](images/2632b121866020ffde6bdceee821e4d81430159476d96e0e01139dc9d082ba70.jpg) + +![](images/2cef513138676cfd0753561a3373c26db0d5be094be17da19006ce2882bdbd13.jpg) + +![](images/f1f8095ce03c55845db1331630f40c1812ffda931409ba5f89385376c17bfa67.jpg) + +![](images/7ba0cc1081bd824ad107543a5dbe34527a4105ea1c1e9c55a5359263cccada3f.jpg) + +Input Text: "Give me the segmentation map of the most salient objects in this image. Return resulting image by using image generation." + +![](images/6076be77d027d41534f7c4a4fd02e93499c2b66c1cccf37db5b69d7f522c9268.jpg) + +![](images/17e080b72e8be225cacc760b2d20b9fc982e4786f8527d6dc2ca0c3cbfbee8d2.jpg) + +![](images/8f405db10bccda82cc90e5b0887cf8f372d62c3308cf2e2f187d9b3c87f37919.jpg) + +![](images/a34351a4399d5f36a740b505bcdf6d1791a25970e7e86a6c65bfafedbf3d931e.jpg) + +Input Text: "Give me the segmentation map of the most salient objects in this image. Return resulting image by using image generation." + +![](images/8619a8168756037dbe0d396ebd1fa868b9e5eebb02bfe0bc848ba11f025e59c5.jpg) +Figure 52: Task: Image to X: Salient object detection. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and BiRefNet [127]. Observation: For all examples, compared with Gemini, GPT-4o can detect related salient objects with the text prompts while Gemini can not achieve this function. + +![](images/11fb36737f5588598b2c288b162d49548d2524ca0fb7dcafb49bbd7f1ef163a6.jpg) + +![](images/8fcfecbfee8e3af8ee12f1909d0f8d6442457efaad84be98a5ae90af97d909d1.jpg) + +![](images/689143eb07d759080356a1acc014ea1f91236ddde377299fa31b78f9f34ed9bc.jpg) + +Input Text: "Give me the segmentation map of the most salient objects in this image. Return resulting image by using image generation." + +![](images/fd89b62d113f6d9cde9f7af5c15f0c3435fb8051b9a81f77a0195174a31e7771.jpg) + +![](images/53c762f53aba99798ebe7781772fd8e13f36de30319f71548adb22664211cf14.jpg) + +![](images/c0db26c61074cc97eac7d00434065dec4cfdf52edd3efc8af38cd021e086e4d3.jpg) + +![](images/6b703d57fd8284c322132b56c18ad285d5a472bca055f06fdf9ddb620574ba3c.jpg) + +Input Text: "Give me the segmentation map of the most salient objects in this image. Return resulting image by using image generation." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +BiRefNet + +Image-to-X + +![](images/f93d94cd33ec493cea22f6299c05435ef6df644e620ad03fc4906616201dd94b.jpg) + +# Evaluation: Mirror Detection, Grouping and Shape. + +![](images/314ecf8346962cc266f1e67d8f88be07f8a8eaca1ca9d18ed756eea986ca6a35.jpg) + +![](images/8de34136e3817eb2b912dec2f52ae54240a4d61a51e8f5d08695f60f906cb9b3.jpg) + +![](images/3a21a982cd81980080fc0bfe53df60869cb5c77ea93c8479b28806aab9298ca1.jpg) + +![](images/461d26dcff028be557e2efbcb1789125eb28fa946c8494b2dc6491aefc57fa30.jpg) + +# Input Text: "Please segment all the mirror in the image and directly generate the output image." + +![](images/cf3a11167038e14a620917c62c66c24cffe74d0bace22efc3b2051879d1ac82a.jpg) + +![](images/1bc679f32fbc6bb7c76fac345ab8d2e03f52eb51f303c9abdaaec3738e3b8348.jpg) + +![](images/dcde7c31e72b7a5f000f1f4ce3071419d9f67361a8d18ec4966c3bb5f4513ab0.jpg) + +![](images/83a5063b90e2d85ae8723b1c0527ebfeb79cf418f10b31ccefd38f2bc7514bf1.jpg) + +# Input Text: "Please segment all the mirror in the image and directly generate the output image." + +![](images/00cde007c7ba440020df1e4e47694ace493a34ef86885baa498937144d1106a2.jpg) +Figure 53: Task: Image to X: Mirror detection. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and VMD [107]. Observation: For simple cases, we find that GPT-4o can carry out mirror detection, as shown in the first example. For the complex scene, it cannot work as well as VMD. + +![](images/ce4d2858b6a29f44fe079438ac4f53fe674726328c863a6f0a045947d1adac83.jpg) + +![](images/1d6b2ece4089619c8dc3c3901314db44c609491a2174435fb009e552ab953e21.jpg) + +![](images/1a402093b931cc216473bf07b0f3c0350d4d6dad3092d0e84036254347477cea.jpg) + +# Input Text: "Please segment all the mirror in the image and directly generate the output image." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +VMD + +# Evaluation: Shadow Detection, Grouping and Shape. + +![](images/88eb6272e2083062c89d2da6799a7fbf0bebd2d74c908f4e8fab3e2024206b52.jpg) + +![](images/f723f0fa1807616a1f9fc4a326c92c0dbc8d41bfa409fc55bcd69f03cf0e4cc4.jpg) + +![](images/50de3ca29f0a98439974b500ee80d55a3e4acb3f252738f8682accecf3be15e0.jpg) + +![](images/ba72c57d88f3c19ba187e2c20d73fd9352c5b8f47d0b13557d72b3f97860362a.jpg) + +Input Text: "Give me with the segmentation map of the shadow in this image. Set the shadow region to white and the other regions to black. Return the resulting image using image generation." + +![](images/7968ea927b3c105ef0585b8c69f50258a2601dd8140b619a7c907020835befb5.jpg) + +![](images/98fe6ae3af0e25ba30cef5b80dbf80b30b9cf636e59a4dadffc0a63864c71cdd.jpg) + +![](images/7665c0eede3baa10374170e0e9e1926291832c424771b68766df8bf95ef3f8f9.jpg) + +![](images/ce1b3984804538be38498c0aaa6fc99ea0991de8d1262b9961e409a6f9e0f3cb.jpg) + +Input Text: "Give me with the segmentation map of the shadow in this image. Set the shadow region to white and the other regions to black. Return the resulting image using image generation." + +![](images/9a09eb88416d90950b00ecbf953c22c1a7c50188a8cd661fe6744dcc97906d76.jpg) +Figure 54: Task: Image to X: Shadow detection. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SDDNet [21]. Observation: For more complex examples, both models detect both objects and their shadows with one mask output, as shown in the last two rows, leading to false positive predictions. + +![](images/df7e8259254f0cce7de2b44908edba93ace97aedc79647c2a02a490d81d15f6d.jpg) + +![](images/1e842cfe1f25dd038d061f3ab7dd7da184ce007a8b89e10fd9297ee4bd1d051c.jpg) + +![](images/1b8d6a02d0d6267d2cb4821bda728d32861a5521511305a19499be24186e0a29.jpg) + +Input Text: "Give me with the segmentation map of the shadow in this image. Set the shadow region to white and the other regions to black. Return the resulting image using image generation." + +![](images/abb40d377e6294ce85ec725e10870be0d09e757df45ef1ac17d63772ebe95887.jpg) + +![](images/c5e565189347123ae969a22fa0002406f12393da53fa69379e6fbe9b7fbaddfa.jpg) + +![](images/087970cb94a73330a9143d8515c6bdd8a8061b49291d5ee3caa0aa8911c73064.jpg) + +![](images/55eb18aa82b40bdb5d13022c1ffe68e93f0cf3359c85faa059e9e14d3e5ff8c3.jpg) + +Input Text: "Give me with the segmentation map of the shadow in this image. Set the shadow region to white and the other regions to black. Return the resulting image using image generation." + +Input Image + +GPT-40 + +Gemini 2.0 Flash + +SDDNet + +![](images/0c35796bfccda9797611a7529116ead987ca3802f68e686dd8c44e01a6055d5c.jpg) + +# Evaluation: Camouflage Object Detection, Grouping and Shape. + +![](images/f58d9105160e737da34562b6c9c09dca8743d388c9ac9c2f0b8894657da4a521.jpg) +Input Text: "Give me the segmentation map of the crocodile in this image. Return resulting image by using image generation." + +![](images/f77b58aea411a06d091798fd19812f2d11b864849cf60dea1419dbb96bdeab4b.jpg) + +![](images/36ba8480160ce7a11902f9d63c93a779ad81abc66d85c984fe46a402a46397ff.jpg) + +![](images/628f0ed3fe5c2872259b8c53ed502d9c944f34e65615f8483a616fc2cad70310.jpg) + +![](images/b3d11b71a64fbc57f21f8ab46676a1e3eb0408ddfc762cacc7d3fc67d895124c.jpg) +Input Text: "Give me the segmentation map of the fish in this image. Return resulting image by using image generation." + +![](images/73484faa2e3dc80819041f2a89b22046ef3ff187c2c48e51c3cfc6d540a16621.jpg) + +![](images/8a361a39ea1256599aa45e581bc05adce2bd4ba199154e81ac85761e04a5d305.jpg) + +![](images/24ab3a835dd0ae72fb511f6dddb64f64bce8bf595463eb6cff06f05aa41f50a1.jpg) + +![](images/04a1ddeb73159d510035c4040bc6dea5a7b55b6b32df8201b3f02a9a20621b00.jpg) +Input Text: "Give me the segmentation map of the fish in this image. Return resulting image by using image generation." + +![](images/0964f3c1bd27ee97370c3070e8b80b145333e5ecc74f44dff7f9bad2234476a9.jpg) + +![](images/db93f8cdc75167415d8ec0a2d7ba0ce8bac4ffe433a2ff11a79751a9deb5cc51.jpg) +Figure 55: Task: Image to X: Camouflage object detection. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and BiRefNet [127]. Observation: Both GPT-4o and Gemini 2.0 Flash can detect and segment the camouflage animals for simple cases. However, the spatial misalignments still exist. + +![](images/cc09b3c0d84509e3bc312aabe02473313c5d4fecd18fd590ee4eaa3bf1153770.jpg) + +![](images/50ee1a066627ca3b776f328349447b722e1083477451cb97c6b6bfb2c2054b04.jpg) +Input Text: "Give me the segmentation map of the toad in this image. Return resulting image by using image generation." + +![](images/72211f5127f3ea905b058ad3d92ebb6f194bea63cc537e32cd09a144926d10d7.jpg) + +![](images/44a15d6fa321c083f59b0c39c8f37dd0512f34e39c60f7e70976d6279609306a.jpg) + +![](images/ca7cc128d79c704808bc1637b90a08b777ee9b0baab8865ec66e337610c7be1e.jpg) + +Input Image + +GPT-40 + +Gemini 2.0 Flash + +BiRefNet + +# 2.4.4 Depth Estimation + +The depth estimation task involves predicting the distance from the camera to objects within a scene. In this paper, we focus on monocular depth estimation, which takes a single image as input. In Figure 56, we compare GPT-4o, Gemini 2.0 Flash, and a recent SOTA method, Depth-Anything [114]. We first notice that Gemini cannot produce reasonable depth estimations. For GPT-4o, although it can output a fancy depth map visualization, we want to point out that this output is a grayscale visualization of depth estimation and cannot be directly converted to the depth of each pixel. We show mainly five cases. In the first test case, we notice that GPT-4o is good at capturing details in images, which Depth-Anything may not be good at. Although we cannot directly determine the accuracy of the depth value, we can judge from the visualization that the depth relationship between objects is accurate. What GPT-4o cannot do well is the background. Since the background in the image is the sky, we can infer from common sense that these areas are infinitely far away from the camera. However, the depth map output of GPT-4o does not handle these areas correctly. GPT-4o performs similarly in the second, fourth, and fifth examples. Among them, we would like to emphasize the fourth test case, since for buildings farther away, GPT-4o has no way to effectively analyze the distance between each building and the camera. In the third example, although the output of GPT-4o is very confusing, it completely misunderstands the depth relationship of the entire image. Therefore, we believe that the depth estimation performance of GPT-4o is still unstable. + +# Image-to-X + +![](images/28038c53ecd04cd0c7f8a9e7042077716a5c924fd1bcd4bdb4dc296a5e381da1.jpg) + +# Evaluation: Depth Estimation + +![](images/c0365698147e513e49bef371ac74ee5fb93c63b6c1496ce9ac4635ce65cd901e.jpg) + +![](images/607880b57a1f83f7d1721514860baaaa5af0252138afb508b72a65611fc2b893.jpg) + +![](images/5d70486874bdca6ec3533ff018df819168b3864b389e7def95b35ea1d5fd3941.jpg) + +![](images/1d5ece3f0e15c3834d92639316498ad868ae535068088c79e13b0ab393f5994b.jpg) + +Input Text: "Please generate the depth map prediction of this image." + +![](images/96954093e4eb35d0ca710e8119a031cdc99bc1453c9ec711f72072006f7ffbbb.jpg) + +![](images/c13099707bf9fe0d5399ee37e5da600f88c1036e16f352a6e9ded8e89a045d2e.jpg) + +![](images/4b28cc6dcc84678e3173e8fa682a9dc064ea0f9777f5f93daeba4307f5cb80ec.jpg) + +![](images/ab5d35fd29583376a884277394b92fa4599215c40d88cdcc94c6b9c9bd93f1ae.jpg) + +Input Text: "Please generate the depth map prediction of this image." + +![](images/e822d81326618b0d941cb4c3271bef0fd0548efa0ef06244f8b36e4b001ccf7a.jpg) + +![](images/76849dc2c4d82c6863264e63ab6e8135e1109cad0f3635b978fc71a1be56563d.jpg) + +![](images/c1871f48314d2af2fd2dfd3ce2bf7c8ce6dd5d5432b0c3771e9206b1cb6dbbef.jpg) + +![](images/40f481609f3f72c80c1bb3a1911a2bcc98e71fe948326192df5cb8ed5f01b0a6.jpg) + +Input Text: "Please generate the depth map prediction of this image." + +![](images/091b9e25759363e18a4a8027407ebff16dae2c1d7196bfd2671970b2dfdf18dc.jpg) +Figure 56: Task: Image to X: Depth estimation. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Depth-Anything [114]. Observation: We convert the depth map generated by Depth-Anything into a visualization map similar to GPT-4o. This evaluation shows that GPT-4o has the capability of distinguishing the depth relationship of different parts in the image, but its understanding of the background is insufficient. + +![](images/eb86efcba3a9c240da3e722bda20727de1f615bfe422fab64ff3b091cd85df06.jpg) + +![](images/042e2b1b154968fc9607cbadf6e30a37999a81fe9a06bc6f75a82aeaafc4d979.jpg) + +![](images/d0fbcde1d1f0ba76a9c49ab60586f0a6bd39c2d0d6c38d24f1164c0b482dacaa.jpg) + +Input Text: "Please generate the depth map prediction of this image." + +![](images/7b4abe90aad35a4b1cd796581e661c9ef27e071aff9b1800f3b7341066fed935.jpg) + +![](images/2f8dd6355dcb71362e42158e49ed0aac5ddd056ed4d4516867721782b9b2a0b2.jpg) + +![](images/07fe029890bbeded3efc202fc24a6394fb11dda52778c7ad9cd6a134088f6ae9.jpg) + +![](images/cce42e0f3762b76b5639e2686fa7e95138a6dafc58db634f3dc3c0d47aebd717.jpg) + +Input Text: "Please generate the depth map prediction of this image." + +Input Image + +GPT-40 + +Gemini 2.0 Flash + +Depth-Anything + +# 2.4.5 Normal Estimation + +The surface normal estimation task involves predicting the orientation of surfaces at each pixel in an image, typically represented as 3D vectors. In Figure 57, we compare GPT-4o, Gemini 2.0 Flash, and Marigold normals [48]. The results show that GPT-4o can generate reasonable results. However, since GPT-4o's output is an appealing normal map visualization, we want to clarify that this output is a color-coded visualization and does not directly provide the exact normal vector for each pixel. Thus, we cannot use lighting or other methods to verify the accuracy of the normal maps, and downstream tasks cannot use the output results. However, we also find some unreasonable details. In the third test case, common sense suggests that the ground should be flat, but GPT-4o predicts normals for these textured areas that differ from the surrounding areas. + +# Image-to-X + +# Evaluation: Consistency/accuracy. + +![](images/652932d800457a58f1281b3ba28fa3df0773930a60325d39e942f44b98045c8e.jpg) + +![](images/e099a5ea018e11845a670b91da9b9e084bcfb35931d3f4ead8ac754370ad78b3.jpg) + +![](images/946a58c00f3ff3a1895e1b765823d31d38f03bbcd704ed7183effd358ebae58a.jpg) +Input Text: "Generate the surface normal map of this picture." + +![](images/583b65ff347f460c795a71a3cedff21e9e48ec2217a26a74271927b6aa96f5d5.jpg) + +![](images/e4c0179dfb831279ce07771cfe8cbac48857d6e750c966acbf75a9fd4eaf0a65.jpg) + +![](images/396cc4efe9e599f8f4c4b1bad0da4de3244d5eeed20ffeb09471237218d5adde.jpg) +Figure 57: Task: Image to X: Normal estimation. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Marigold [48]. Observation: This evaluation shows that GPT-4o has the capability of generating a visualization map of the surface normal, but the understanding of the details is still insufficient. + +![](images/9d509d18edc90347ae9f6dcacd7565656a476da05b9ef46fdf5918a71a428c43.jpg) + +![](images/d2aadb5f85512222f6151124f2d78a026acca8ece755c80d998f65be5f2e8ae8.jpg) + +![](images/5d604baed8d657358ea3a3aa09a0796cbab115ba03ae7dc21bef555277a6b92c.jpg) + +![](images/e324eb93d8c4dfceef9d783883e1f613acfba48879633c0d04fd8039c2f4ba20.jpg) + +![](images/c748048db9bc4e6543e0723fd2782186695eaa29379aa62ac724941790f330f9.jpg) + +![](images/69d386fd0754ccef0b8aa405ddd5368febd5edd73da8504508220e1ee0fb3ce3.jpg) + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +Marigold + +# 2.4.6 Layout Detection + +The layout detection task requires the model to identify structural components (e.g., titles, paragraphs, tables, images) in the given image. In Figure 58, we compare the performance of GPT-4o, Gemini 2.0 Flash, and LayoutLMV3 [44] on the layout detection task. In the test cases, GPT-4o hallucinates layout elements that do not exist, although the final output is another document with "layout detection" results. If we consider the use in downstream tasks, such results are meaningless. Therefore, we conclude that GPT-4o is not capable of the layout detection task. + +# Image-to-X + +![](images/4ef72f42ea0ddf4ee1ec0367601e96f40b0391ec4c9fa27f967f8b50716f6305.jpg) + +# Evaluation: Document Detection. + +![](images/2e8258faee6378598f378a9a63923d7d64ed24c4a8d936ba6407f7a1752ce76a.jpg) +Figure 58: Task: Image to X: Layout detection. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and LayoutLMV3 [44]. Observation: The results show that GPT-4o and Gemini frequently generate a different document but a correct detected layout. + +![](images/235e18e587aa841842c1dc8ffd53a330c1345d64b6c3875f51db1f215dc6fc92.jpg) + +![](images/37e0cb1ddf2d5338758f440584a48a5f50fe2d534e9b2486a05a7690ed334ed9.jpg) + +![](images/473d5c4fa40852da1be956bd91fc8dbb01033d977f73e000514033c57935eebb.jpg) + +Input Text: "Generate a new image which contains the layout detection results of the input image." + +![](images/36374494cf58c80bf10d518e18df6bb7c26dfed3007927ce329edde282b717d7.jpg) + +![](images/e80a0e37e52eb35595a7df3a6aa08948e175cfb951ba4bd65e8b98ab1887d928.jpg) + +![](images/b2bbda9be8e2840ce6f8b02ad35c98e1401b5728eb36e05879ea84bdd7c2ff9f.jpg) + +![](images/d7357528516c7245255631ca4698524569152aafd5239b1a9cd9dfa6ee24c212.jpg) + +Input Text: "Generate a new image which contains the layout detection results of the input image." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +LayoutLMV3 + +# 2.4.7 Text Detection + +The text detection task requires the model to detect the texts in the given image. In Figure 59, we compare the performance of GPT-4o, Gemini 2.0 Flash [99], and CRAFT [3] regarding to text detection. We observe that CRAFT exhibits better performance compared to the other models. + +In the first test case, GPT-4o demonstrates comparable performance to CRAFT. However, in other cases, GPT-4o continuously generates some nonexistent texts and labels them as "text area". This issue becomes particularly evident in cluttered scenes or images with complex backgrounds. These false positives not only reduce detection precision but also make the output less reliable for downstream tasks such as OCR or document understanding. On the other hand, Gemini does not generate nonexistent texts but tends to over-predict some areas as text areas. + +Image-to-X + +![](images/80922918a65d3be2dc64726ee8e795ae73b3e97479ae0a61fb7cbf01bf4cb9c1.jpg) + +Evaluation: Text Detection. + +![](images/8d12c4277ccc433463d916e0d4c021703de904c087034505189b0c4f6bee4dc4.jpg) + +![](images/d79345dd614218dc4a3fbadc34f3efbc6475c52df944a483358394a2ae83a4e5.jpg) + +![](images/3b08ebeb2ba10b1056a1d7752ce27e2b4990f6b98a2f7acdbabcd32480037452.jpg) + +![](images/0a75ab15149c9afdb4641c46e30bd50461202b7c88fa785bef866a2691d27f84.jpg) + +Input Text: "Generate a new image and label each line of text in the image with a green box" + +![](images/12fe1a849b28c956a4b37361bf45581ff6000a0b09f3ab5787b4647bbd9a3831.jpg) + +![](images/2ea1368e7787c97c60d6731352894c9323ed501fb6dc4e2d3cb1aa2c697b91f0.jpg) + +![](images/5fd98c00acb24bb685d7ae786468c021486683a40b42e8431c28d0d29da3cf89.jpg) + +![](images/05076e4a3056272c3ce291b647bd2b2098f8bef74c7807b41ab1d48af70b1f0e.jpg) + +Input Text: "Generate a new image and label each line of text in the image with a green box" + +![](images/f2a66d298cf6681e639f60487d91eb2376e9527b2385d109001392557fd3fc3c.jpg) + +![](images/0ceb9d3c24c10622601c5cd8f2bfe1590592e3f441bd27e97f56e47c7c523543.jpg) + +![](images/9b26a02ec460020f269c9e957ef3f51615c8aec20faba332c7ad88bddff5fcea.jpg) + +![](images/a4d89254c371d49daaab63ab7c798533a278ce2f2d6e10bca6da553223761a32.jpg) + +Input Text: "Generate a new image and label each line of text in the image with a green box" + +![](images/023c34de6dd441f99db1b377780b4fdcd6666f8679ab43c519b68c515e5d0add.jpg) +Figure 59: Task: Image to X: Text detection. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and CRAFT [3]. Observation: The results show that GPT-4o frequently generates text that does not exist. + +![](images/9d85af9012642e27d9952f9795e008d868506f471396fa6b593f956b82ca6fd0.jpg) + +![](images/29eecd09cc58857aaac407a762e3d2472c1aae1724d08052e5c17f20df19b893.jpg) + +![](images/d4be8e3b705492a093a2511d862d76f96fe11049428146f4bcc9bedfb9524da6.jpg) + +Input Text: "Generate a new image and label each line of text in the image with a green box" + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +CRAFT + +# 2.4.8 Object Tracking + +The object tracking task requires the model to continuously locate and follow the specific object across the frames in a video sequence. We test the multi-object tracking, which requires the model to track several objects concurrently. We test four cases (Figure 60, 61, 62, 63). We compare GPT-4o, Gemini 2.0 Flash, and a recent SOTA method SAM-2 [86]. Our first observation is that GPT-4o seems unable to generate images that are consistent with the original image. This may be related to the nature of its generative model. Even if we ignore this, for the tracking task, SAM-2 still performs better, while GPT-4o will have problems such as failing to maintain consistent tracking of the target, frequently drifting, or losing the object entirely. In Figure 60, the output of GPT-4o generally demonstrates the ability to track objects, but there are also some defects. For example, a new object is even created out of the existing objects in the last picture generated by GPT-4o. We speculate that this is caused by the influence of the conversation context. In Figure 61, GPT-4o outputs some content that should not be in the output, such as the "caf" tag. In Figure 62, GPT-4o can track a relatively simple object, but it fuses two separate objects. In Figure 63, GPT-4o lacks the capability of tracking in the dense scenario. + +# Evaluation: Object Tracking, Matching and Video Analysis. + +![](images/64bca9f9e552b5aa559f2be8f97c1db4addda01d6db566245d3a344dc004fa2b.jpg) + +![](images/4c521d5dabb23441105ce25f3e4ea4fe5cdfd75cbe5ba899f0fa357d7cbf73e0.jpg) + +![](images/eabf80aab7589d05f040c765b5655db13fd6ccc0f85020e3be5daad50092f0f9.jpg) + +![](images/075019b013e4717772a6d76e14fbaf9862c46b4f8223096c86fa73cb18a7fffb.jpg) + +Input Text: "This is the first frame of a video where I've marked four targets with different colored bounding boxes. I'll subsequently provide you with other frames from this video for object tracking of these four targets. Understood?" + +![](images/03ac5a08165418ccecfd0363ad1b72667a075da1b20d2f84dff1cf04027bdc79.jpg) + +![](images/6075fec13975411e989e0044e4a2f0ee1c763038760b1eee8ab0a205a97ce7fd.jpg) + +![](images/2ed33745b63fd77dc89a8024a98963f50198ec8a1e52ceda4ab8062eb4b2085d.jpg) + +![](images/9dff828e49740629bf2ee9f75d69169e08193f97717d5fed56fe0f6c1a5bcc42.jpg) + +Input Text: "You now need to perform object tracking on the four targets in this image and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation." + +![](images/679acf7cda0f660094595d465faedbcf9b28ca58cbf88966cb6e491aa278d75a.jpg) +Figure 60: Task: Image to X: Object tracking, matching, and video analysis (1/4). Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SAM-2 [86]. Observation: This evaluation shows that GPT-4o has the capability of tracking objects, but it cannot generate a consistent image compared to the input image. + +![](images/770b79989d7eeca4a1a1dfe41749ee7619d15b0809497750a4ef3f48ae31bf1c.jpg) + +![](images/10f93dc20ac60dddd7d9e915fac07726f076d66e2f81a27aa151917e0e62abcf.jpg) + +![](images/8fc6d53c4cd8581616940efb6ac02413d37aece5c9462fc0de359572bfb0d570.jpg) + +Input Text: "Continue tracking the four targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation." + +![](images/5d220158cfe75b892d7a3627f6bbfe90c6327f1ca348ff2386cb8e06bc0bd166.jpg) + +![](images/e5aa20f742421c44599c060ff033d5084778874fc965254ee47e49189abed76d.jpg) + +![](images/9a9ede0bf4fe178006bfdab612511041503aa542b158bd5a1cafebee66aa0d19.jpg) + +![](images/4cb8d78d25db9472f21ac15da10a306c42418c98f395c4de73dab05c66763de4.jpg) + +Input Text: "Continue tracking the four targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +SAM2 + +# Image-to-X + +# Evaluation: Object Tracking, Matching and Video Analysis. + +![](images/123381a94924d4ad9fab326af061828f48aab18ef9bee3847f59fc56015e22fb.jpg) + +![](images/17ab50440ba1a2025044fd3cd0741266c996928780e88a97d4c4333b56dfbe5a.jpg) + +![](images/b44b50f9572bbd72865bce64600e8f10dc36affa85e764b23905c4db84176b3f.jpg) + +![](images/1bdda49b07b096341fdec95b3c342e834c5c3255bfacb631d015bf9dcbf57d2b.jpg) + +Input Text: "This is the first frame of a video where I've marked three targets with different colored bounding boxes. I'll subsequently provide you with other frames from this video for object tracking of these three targets. Understood?" + +![](images/f266fde975c8808d3f3339c241e64e53840be40a286abf4b782d7dec9c606c51.jpg) + +![](images/6bd20b3ebf4a99d1a659fb4acf85915aae6729d1dae0be2eba9584761c9e9db7.jpg) + +![](images/19ee65abb68b04bcc6de8f3231bd1dfc53040c57e2774f2a64269f4ba9efb4df.jpg) + +![](images/2864e1c72caa29f54357f917cad03463a7b5f8e666314b153768cc9c9bd444d6.jpg) + +Input Text: "You now need to perform object tracking on the three targets in this image and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation." + +![](images/17281215ee551aee19132f26e2f57e9634fc4c01686dbd233f99e38afdc31bac.jpg) +Figure 61: Task: Image to X: Object tracking, matching, and video analysis (2/4). Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SAM-2 [86]. Observation: This evaluation shows that GPT-4o has the capability of tracking objects, but it cannot generate a consistent image compared to the input image. + +![](images/dd4f8ceb102f253e19dc4e99a539f2e28701caecb904ac3596b116d8f9213bed.jpg) + +![](images/288c7e6c2ba7c4828a38dbb2d93551531e7662268ca80529efd05e46c3b9cdc5.jpg) + +![](images/b50fd5a292c370336aaa3e339bafb070bea7a7a2f80c934dad264af03b23bb2d.jpg) + +Input Text: "Continue tracking the three targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation." + +![](images/f835ef19b16a59831866bcf40d9d9bb5ac713048e65f176650e206cc3a2dd8ec.jpg) + +![](images/0be63390ebc394d88f7518d11a9497eca8059c414bc31788dd9f9bad0ee7ee64.jpg) + +![](images/b09f487669528fc2963ebabd0310ed69c5b420b7512e220fe3465e3c2ed5e12b.jpg) + +![](images/1f674c14a05985ac60abfb2449ee82311b3fd74a5ce3e0baab98c7fbcc2f60c0.jpg) + +Input Text: "Continue tracking the three targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +SAM2 + +# Image-to-X + +![](images/2d588085b2425d35e86ead8fd151a9f1b0a00f306e88ed5971910bddaaa8e8b1.jpg) + +# Evaluation: Object Tracking, Matching and Video Analysis. + +![](images/a1e959570f347e3fef4bcc4ccda36cec01a4c6a5cdd272c2a85fbb3cc2dea20d.jpg) + +![](images/c2af40c5244e0f72a0326fc6c92a7e15810a0e852285d96d92e3191254cf5eaa.jpg) + +![](images/ddca6e48a5804db47d5999c3e273c4763a7ecac6ea67d75a092e6a71ab128b72.jpg) + +![](images/946427a8be6f45ad8290603f01d85b6ddddd4e891e723686e136a789ad2d67dc.jpg) + +Input Text: "This is the first frame of a video where I've marked four targets with different colored bounding boxes. I'll subsequently provide you with other frames from this video for object tracking of these four targets. Understood?" + +![](images/9e99b81570d109732b42b178967ae11e9ed7d965809fd7f2fd60d19ccbc7cd6d.jpg) + +![](images/d39af4eb408498b81d98d8cb2ffed5b472bc1b7317c2c8a5374a4a56e4dfb056.jpg) + +![](images/9682ba919f23fd2efddf43910798451e8e7f75415d38ed0e818f82b242e57922.jpg) + +![](images/1cf0ebdbd8d076d3769a4ec23e0b1f57389127e1424e5db6a56d16c052155351.jpg) + +Input Text: "You now need to perform object tracking on the four targets in this image and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation." + +![](images/f90e61bd1f023e6f33aef6353e15aca3f50253a83831ca3012b01fddcbeee732.jpg) +Figure 62: Task: Image to X: Object tracking, matching, and video analysis (3/4). Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SAM-2 [86]. Observation: This evaluation shows that GPT-4o has the capability of tracking objects, but it cannot generate a consistent image compared to the input image. + +![](images/cb1c6099278ab7785abb32f0832616b8e30bed5c7c7f8049237a2dfc3dbc2c4a.jpg) + +![](images/c437cfe35c54814ac08dea9c46d0e6240fb84f38e9174f214d7a634c29fe2495.jpg) + +![](images/4973b35320e8a2b303648fca1abc0044f2a37f44f7b47f898eb02a0074a2a7ba.jpg) + +Input Text: "Continue tracking the four targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation." + +![](images/85f7bef2b593291c46513388cafafe9023a5e4f53bc7397e3a888a28d2cda80d.jpg) + +![](images/a5b89b647f49cafe186c5a2f230a7d551fedbe9a4b6ab523991f91c5baceb76f.jpg) + +![](images/21754ab30160b46ae72d81bb54a4d2c4f2af0b2cb67b22e08bf3c41755442754.jpg) + +![](images/d0e994754d3ed22f81935820cf17a41860327c42094e23bb6b7a579509ca0543.jpg) + +Input Text: "Continue tracking the four targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +SAM2 + +# Image-to-X + +# Evaluation: Object Tracking, Matching and Video Analysis. + +![](images/9801dce9b0ee5070b386427554ed58b6a513f89be080d33cee85238e85084ce4.jpg) + +![](images/47c0aa44b5c1e668ed4e267b28409511fe44987d1f237b43ee033cba301e51ee.jpg) + +![](images/c5d213403e828619e550ddf5ada406e961fa2efb34275366ed904f09ea4b9216.jpg) + +![](images/13971dbe9a9a59527de67d029db2291646e369394e8bee5fe2ff780074999bab.jpg) + +Input Text: "This is the first frame of a video where I've marked six targets with different colored bounding boxes. I'll subsequently provide you with other frames from this video for object tracking of these six targets. Understood?" + +![](images/4f4a54c95a2c61f0b917046158f22c0ac2fb4eaa7c0013af0028c8b141cd8788.jpg) + +![](images/11029002e14154e37a105bc63a9acff74ed50d918df4d80c2a9f039305b6e741.jpg) + +![](images/915695ce6be4cdda6f09d7deabc426642440f267759651fc6ec01c427928bd50.jpg) + +![](images/2b470b9a2e3376254260fb3776461599c8d766e1b076eaec1370e72b4b2e5b06.jpg) + +Input Text: "You now need to perform object tracking on the six targets in this image and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation." + +![](images/4610ae5599e3f85d65d10713443204f4af850e0751404e619cfc018f179db18b.jpg) +Figure 63: Task: Image to X: Object tracking, matching, and video analysis (4/4). Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SAM-2 [86]. Observation: This evaluation shows that GPT-4o has the capability of tracking objects, but it cannot generate a consistent image compared to the input image. + +![](images/17df35e7ffc6198a7ee15be18d8062a7c6c3846eac54441a5167179ad1116281.jpg) + +![](images/3979db3e010b09be23fdd79665b9946d9fa4adc89d8766528d7afd7b653cf603.jpg) + +![](images/136b68479499a83964864cd84b1bd318f9fd41257d6d18e9f8d976b00b3c4581.jpg) + +Input Text: "Continue tracking the six targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation." + +![](images/ee95015664fc94518401d21a2920ffd79e12d1ce90aaee7e7c2e23d8fb252353.jpg) + +![](images/9bb8f63af64ed04966c8891cf4cf6ec49bd44f0d1a11eca0e431b45d7918994e.jpg) + +![](images/b2936841b119f7c4783c46da417c26650df828319071a28b934df3513e885121.jpg) + +![](images/a093ca2444e0583bf58d16a259862c8f04e828919d40f21a081d04a7116fc357.jpg) + +Input Text: "Continue tracking the six targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation." + +Input Image + +GPT 40 + +Gemini 2.0 Flash + +SAM2 + +# 3 Limitations + +Although GPT-4o demonstrates impressive capabilities across a wide range of image generation tasks, several limitations remain. These challenges highlight key areas for future improvement in developing unified foundation models for vision-language generation. + +# 3.1 Inconsistent Generation + +While GPT-4o often produces high-quality and semantically relevant images conditioned on textual prompts, it occasionally exhibits inconsistencies. Specifically, the model may generate visually compelling outputs that deviate from precise semantic cues of the input image, such as object count, spatial layout, specific shapes, or designated colors. These inconsistencies are especially problematic in tasks requiring partial image editing or compositional accuracy. Notably, such issues are less common in diffusion-based models or discrete denoising architectures like MaskGIT [11, 6], suggesting that GPT-4o operates under a distinct generative paradigm with inherent trade-offs in fidelity and control. + +# 3.2 Hallucination + +GPT-4o is also susceptible to hallucinations—producing content that is logically implausible, semantically inconsistent, or factually incorrect. These include fabricating non-existent objects or geographical features (e.g., imaginary islands or landmarks), and misrepresenting relationships between entities. Such errors are particularly prevalent in complex or underspecified prompts, where the model appears to rely on internal priors rather than grounded world knowledge. While hallucination is a common challenge across generative models, it poses notable limitations for real-world applications demanding precision, such as education, medical illustration, or scientific visualization. + +# 3.3 Data Bias + +Despite strong alignment between text and vision modalities, GPT-4o struggles with data bias issue, which fail in generating underrepresented cultural elements and rendering non-Latin scripts such as Chinese, Japanese, and Arabic. The generated characters are often incomplete, distorted, or replaced with Latin-like approximations. These artifacts reflect underlying challenges in multilingual representation, likely due to limited exposure to diverse scripts during training and the inherent difficulty of accurate typographic rendering in pixel space. This phenomenon is emblematic of a larger issue in AI systems—data bias. The training data used to develop models like GPT-4o may disproportionately represent certain languages, cultures, and writing systems, leading to disparities in performance across different linguistic groups. These biases are not only technical limitations but also ethical concerns, as they can contribute to the exclusion of underrepresented languages and cultures from AI applications. As vision-language models are increasingly deployed globally, improving support for multilingual text remains a crucial step toward inclusive and culturally competent AI systems. + +# 4 Conclusion + +In conclusion, this work presents a comprehensive study on the development of unified vision-language generative models, with a focus on evaluating GPT-4o across a wide range of image generation tasks. Our analysis shows that GPT-4o demonstrates strong capabilities in aligning vision and language, achieving competitive results across text-to-image, image-to-image, image-to-3D, and image-to-X tasks. However, limitations remain in inconsistent generation, hallucination, and data bias in underrepresented cultural elements and non-Latin scripts, highlighting current trade-offs in model design and training data coverage. We also emphasize that architecture alone does not determine success; training data, model scale, and optimization strategies are equally critical components of progress. We hope future work will provide deeper empirical insights into such proprietary systems and clarify their position within the broader landscape of unified generative modeling. + +# References + +[1] Hao Ai, Zidong Cao, Haonan Lu, Chen Chen, Jian Ma, Pengyuan Zhou, Tae-Kyun Kim, Pan Hui, and Lin Wang. Dream360: Diverse and immersive outdoor virtual scene creation via transformer-based 360 image outpainting. IEEE transactions on visualization and computer graphics, 2024. 34, 42 +[2] Ideogram AI. Ideogram. https://ideogram.ai/, 2024. 10, 11, 12 + +[3] Youngmin Baek, Bado Lee, Dongyoon Han, Sangdoo Yun, and Hwalsuk Lee. Character region awareness for text detection. In CVPR, 2019. 78, 79 +[4] Jinbin Bai, Wei Chow, Ling Yang, Xiangtai Li, Juncheng Li, Hanwang Zhang, and Shuicheng Yan. Humanedit: A high-quality human-rewarded dataset for instruction-based image editing. arXiv preprint arXiv:2412.04280, 2024. 21 +[5] Jinbin Bai, Zhen Dong, Aosong Feng, Xiao Zhang, Tian Ye, Kaicheng Zhou, and Mike Zheng Shou. Integrating view conditions for image synthesis. arXiv preprint arXiv:2310.16002, 2023. 21 +[6] Jinbin Bai, Tian Ye, Wei Chow, Enxin Song, Qing-Guo Chen, Xiangtai Li, Zhen Dong, Lei Zhu, and Shuicheng Yan. Meissonic: Revitalizing masked generative transformers for efficient high-resolution text-to-image synthesis. arXiv preprint arXiv:2410.08261, 2024. 5, 85 +[7] Shane Barratt and Rishi Sharma. A note on the inception score. arXiv preprint arXiv:1801.01973, 2018. 1 +[8] James Betker, Gabriel Goh, Li Jing, Tim Brooks, Jianfeng Wang, Linjie Li, Long Ouyang, Juntang Zhuang, Joyce Lee, Yufei Guo, et al. Improving image generation with better captions. Computer Science. https://cdn.openai.com/papers/dall-e-3.pdf, 2023.5 +[9] Manuel Brack, Felix Friedrich, Katharina Kornmeier, Linoy Tsaban, Patrick Schramowski, Kristian Kersting, and Apolinário Passos. *Ledits++: Limitless image editing using text-to-image models.* 2023. 21, 25 +[10] Tim Brooks, Aleksander Holynski, and Alexei A Efros. Instructpix2pix: Learning to follow image editing instructions. arXiv preprint arXiv:2211.09800, 2022. 21 +[11] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11315-11325, 2022. 85 +[12] Haoyu Chen, Xiaojie Xu, Wenbo Li, Jingjing Ren, Tian Ye, Songhua Liu, Ying-Cong Chen, Lei Zhu, and Xinchao Wang. Posta: A go-to framework for customized artistic poster generation. arXiv preprint arXiv:2503.14908, 2025. 10, 12 +[13] Liang Chen, Shuai Bai, Wenhao Chai, Weichu Xie, Haozhe Zhao, Leon Vinci, Junyang Lin, and Baobao Chang. Multimodal representation alignment for image generation: Text-image interleaved control is easier than you think. arXiv preprint arXiv:2502.20172, 2025. 1 +[14] Liang-Chieh Chen, George Papandreou, Florian Schroff, and Hartwig Adam. Rethinking atrous convolution for semantic image segmentation. arXiv preprint arXiv:1706.05587, 2017. 62, 64 +[15] Sixiang Chen, Tian Ye, Jinbin Bai, Erkang Chen, Jun Shi, and Lei Zhu. Sparse sampling transformer with uncertainty-driven ranking for unified removal of raindrops and rain streaks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 13106-13117, 2023. 34 +[16] Sixiang Chen, Tian Ye, Yun Liu, and Erkang Chen. Snowformer: Context interaction transformer with scale-awareness for single image desnowing. arXiv preprint arXiv:2208.09703, 2022. 34 +[17] Sixiang Chen, Tian Ye, Kai Zhang, Zhaohu Xing, Yunlong Lin, and Lei Zhu. Teaching tailored to talent: Adverse weather restoration via prompt pool and depth-anything constraint. In European Conference on Computer Vision, pages 95–115. Springer, 2024. 34 +[18] Tianqi Chen, Yongfei Liu, Zhendong Wang, Jianbo Yuan, Quanzeng You, Hongxia Yang, and Mingyuan Zhou. Improving in-context learning in diffusion models with visual context-modulated prompts. arXiv preprint arXiv:2312.01408, 2023. 56 +[19] Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Janus-pro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025. 1 +[20] Marcos V. Conde, Gregor Geigle, and Radu Timofte. Instructir: High-quality image restoration following human instructions. In ECCV, 2024. 34, 35, 36, 37, 38, 39, 40 +[21] Runmin Cong, Yuchen Guan, Jinpeng Chen, Wei Zhang, Yao Zhao, and Sam Kwong. Sddnet: Style-guided dual-layer disentanglement network for shadow detection. In ACM MM, 2023. 69, 72 +[22] Ciprian Corneanu, Raghudeep Gadde, and Aleix M Martinez. Latentpaint: Image inpainting in latent space with diffusion models. In WACV, 2024. 34, 41 +[23] Yingying Deng, Fan Tang, Weiming Dong, Chongyang Ma, Xingjia Pan, Lei Wang, and Changsheng Xu. Stytr2: Image style transfer with transformers. In CVPR, 2022. 18 +[24] Runpei Dong, Chunrui Han, Yuang Peng, Zekun Qi, Zheng Ge, Jinrong Yang, Liang Zhao, Jianjian Sun, Hongyu Zhou, Haoran Wei, et al. Dreamlmm: Synergistic multimodal comprehension and creation. arXiv preprint arXiv:2309.11499, 2023. 1 +[25] Wei Dong, Han Zhou, Yuqiong Tian, Jingke Sun, Xiaohong Liu, Guangtao Zhai, and Jun Chen. Shadowrefiner: Towards mask-free shadow removal via fast fourier transformer. arXiv preprint arXiv:2406.02559. 44 +[26] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. 1 + +[27] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first international conference on machine learning*, 2024. 10, 11, 47, 51 +[28] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 12873-12883, 2021. 1 +[29] Aosong Feng, Weikang Qiu, Jinbin Bai, Kaicheng Zhou, Zhen Dong, Xiao Zhang, Rex Ying, and Leandros Tassiulas. An item is worth a prompt: Versatile image editing with disentangled control. arXiv preprint arXiv:2403.04880, 2024. 21 +[30] Tsu-Jui Fu, Wenze Hu, Xianzhi Du, William Yang Wang, Yinfei Yang, and Zhe Gan. Guiding instruction-based image editing via multimodal large language models. ICLR, 2024. 21, 22, 23, 24 +[31] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. ICLR, 2023. 28 +[32] Jun Gao, Tianchang Shen, Zian Wang, Wenzheng Chen, Kangxue Yin, Daiqing Li, Or Litany, Zan Gojcic, and Sanja Fidler. Get3d: A generative model of high quality 3d textured shapes learned from images. NeurIPS, 2022. 58 +[33] Leon A Gatys, Alexander S Ecker, and Matthias Bethge. Image style transfer using convolutional neural networks. CVPR, 2016. 18 +[34] Yuying Ge, Sijie Zhao, Jinguo Zhu, Yixiao Ge, Kun Yi, Lin Song, Chen Li, Xiaohan Ding, and Ying Shan. Seed-x: Multimodal models with unified multi-granularity comprehension and generation. arXiv preprint arXiv:2404.14396, 2024.1 +[35] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks. Communications of the ACM, 63(11):139–144, 2020. 1 +[36] Yuchao Gu, Xintao Wang, Jay Zhangjie Wu, Yujun Shi, Yunpeng Chen, Zihan Fan, Wuyou Xiao, Rui Zhao, Shuning Chang, Weijia Wu, et al. Mix-of-show: Decentralized low-rank adaptation for multi-concept customization of diffusion models. In NeurIPS, 2024. 28 +[37] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. NeurIPS, 2017. 1 +[38] Qibin Hou, Yuying Ge, Jing Zhang, Yuchao Dai, and Ming-Ming Cheng. Storydiffusion: Consistent self-attention for long-range image and video generation. In Advances in Neural Information Processing Systems (NeurIPS), 2024. 31, 32 +[39] Qiming Hu, Hainuo Wang, and Xiaojie Guo. Single image reflection separation via dual-stream interactive transformers. Advances in Neural Information Processing Systems, 37:55228-55248, 2024. 45 +[40] Jiancheng Huang, Yi Huang, Jianzhuang Liu, Donghao Zhou, Yifan Liu, and Shifeng Chen. Dual-schedule inversion: Training-and tuning-free inversion for real image editing. arXiv preprint arXiv:2412.11152, 2024. 21 +[41] Kaiyi Huang, Chengqi Duan, Kaiyue Sun, Enze Xie, Zhenguo Li, and Xihui Liu. T2i-compbench++: An enhanced and comprehensive benchmark for compositional text-to-image generation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2025. 5 +[42] Lianghua Huang, Wei Wang, Zhi-Fan Wu, Yupeng Shi, Huanzhang Dou, Chen Liang, Yutong Feng, Yu Liu, and Jingren Zhou. In-context lora for diffusion transformers. arXiv preprint arXiv:2410.23775, 2024. 56 +[43] Xun Huang and Serge Belongie. Arbitrary style transfer in real-time with adaptive instance normalization. In ICCV, 2017. 18 +[44] Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, and Furu Wei. Layoutlmv3: Pre-training for document ai with unified text and image masking. In ACM MM, 2022. 77 +[45] Zixuan Huang, Stefan Stojanov, Anh Thai, Varun Jampani, and James M Rehg. Planes vs. chairs: Category-guided 3d shape learning without any 3d cues. In ECCV, 2022. 58 +[46] Jiaxiu Jiang, Yabo Zhang, Kailai Feng, Xiaohe Wu, Wenbo Li, Renjing Pei, Fan Li, and Wangmeng Zuo. Mc2: Multi-concept guidance for customized multi-concept generation. arXiv preprint arXiv:2404.05268, 2024. 28 +[47] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In ECCV, 2016. 18 +[48] Bingxin Ke, Anton Obukhov, Shengyu Huang, Nando Metzger, Rodrigo Caye Daudt, and Konrad Schindler. Repurposing diffusion-based image generators for monocular depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 76 +[49] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In ICCV, 2023. 56 +[50] Nupur Kumari, Bingliang Zhang, Richard Zhang, Eli Shechtman, and Jun-Yan Zhu. Multi-concept customization of text-to-image diffusion. In CVPR, 2023. 28 +[51] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024. 2, 5, 8, 9, 10, 11, 47, 48, 49 + +[52] Bolin Lai, Felix Juefei-Xu, Miao Liu, Xiaoliang Dai, Nikhil Mehta, Chenguang Zhu, Zeyi Huang, James M Rehg, Sang-min Lee, Ning Zhang, et al. Unleashing in-context learning of autoregressive models for few-shot image manipulation. arXiv preprint arXiv:2412.01027, 2024. 56 +[53] Jiachen Li, Jitesh Jain, and Humphrey Shi. Matting anything. arXiv: 2306.05399, 2023. 66 +[54] Jiachen Li, Jitesh Jain, and Humphrey Shi. Matting anything. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1775–1785, 2024. 68 +[55] Junyi Li, Zhilu Zhang, Xiaoyu Liu, Chaoyu Feng, Xiaotao Wang, Lei Lei, and Wangmeng Zuo. Spatially adaptive self-supervised learning for real-world image denoising. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2023. 34 +[56] Yachuan Li, Xavier Soria Poma, Yun Bai, Qian Xiao, Chaozhi Yang, Guanlin Li, and Zongmin Li. Edmb: Edge detector with mamba. arXiv preprint arXiv:2501.04846, 2025. 66, 67 +[57] Yijun Li, Chen Fang, Jimei Yang, Zhaowen Wang, Xin Lu, and Ming-Hsuan Yang. Universal style transfer via feature transforms. In NIPS, 2017. 18 +[58] Zijie Li, Henry Li, Yichun Shi, Amir Barati Farimani, Yuval Kluger, Linjie Yang, and Peng Wang. Dual diffusion for unified image generation and understanding. arXiv preprint arXiv:2501.00289, 2024. 2 +[59] Zhexin Liang, Zhaochen Li, Shangchen Zhou, Chongyi Li, and Chen Change Loy. Control color: Multimodal diffusion-based interactive image colorization. arXiv preprint arXiv:2402.10855, 2024. 34, 43 +[60] Xin Lin, Chao Ren, Kelvin CK Chan, Lu Qi, Jinshan Pan, and Ming-Hsuan Yang. Multi-task image restoration guided by robust dino features. arXiv preprint arXiv:2312.01677, 2023. 34 +[61] Xin Lin, Chao Ren, and Xiao Liu. Unsupervised image denoising in real-world scenarios via self-collaboration parallel generative adversarial branches. In ICCV, 2023. 34 +[62] Xin Lin, Jingtong Yue, Sixian Ding, Chao Ren, Lu Qi, and Ming-Hsuan Yang. Dual degradation representation for joint deraining and low-light enhancement in the dark. IEEE Transactions on Circuits and Systems for Video Technology, 2024. 34 +[63] Xin Lin, Yuyan Zhou, Jingtong Yue, Chao Ren, Kelvin CK Chan, Lu Qi, and Ming-Hsuan Yang. Re-boosting self-collaboration parallel prompt gan for unsupervised image restoration. arXiv preprint arXiv:2408.09241, 2024. 34 +[64] Bingchen Liu, Ehsan Akhgari, Alexander Visheratin, Aleks Kamko, Linmiao Xu, Shivam Shrirao, Chase Lambert, Joao Souza, Suhail Doshi, and Daiqing Li. Playground v3: Improving text-to-image alignment with deep-fusion large language models. arXiv preprint arXiv:2409.10695, 2024. 10, 12, 14 +[65] Dongyang Liu, Shitian Zhao, Le Zhuo, Weifeng Lin, Yu Qiao, Hongsheng Li, and Peng Gao. Lumina-mgpt: Illuminate flexible photorealistic text-to-image generation with multimodal generative pretraining, 2024. 1 +[66] Haipeng Liu, Yang Wang, Biao Qian, Meng Wang, and Yong Rui. Structure matters: Tackling the semantic discrepancy in diffusion models for image inpainting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 34, 42 +[67] Hao Liu, Wilson Yan, Matei Zaharia, and Pieter Abbeel. World model on million-length video and language with ringattention. arXiv preprint arXiv:2402.08268, 2024. 1 +[68] Minghua Liu, Chao Xu, Haian Jin, Linghao Chen, Mukund Varma T, Zexiang Xu, and Hao Su. One-2-3-45: Any single image to 3d mesh in 45 seconds without per-shape optimization. Advances in Neural Information Processing Systems, 2023. 58 +[69] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object. In Proceedings of the IEEE/CVF international conference on computer vision, 2023. 58 +[70] Xiaoxiao Long, Yuan-Chen Guo, Cheng Lin, Yuan Liu, Zhiyang Dou, Lingjie Liu, Yuexin Ma, Song-Hai Zhang, Marc Habermann, Christian Theobalt, et al. Wonder3d: Single image to 3d using cross-domain diffusion. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2024. 58 +[71] Aaron Lou, Chenlin Meng, and Stefano Ermon. Discrete diffusion modeling by estimating the ratios of the data distribution. arXiv preprint arXiv:2310.16834, 2023. 2 +[72] Yiyang Ma, Xingchao Liu, Xiaokang Chen, Wen Liu, Chengyue Wu, Zhiyu Wu, Zizheng Pan, Zhenda Xie, Haowei Zhang, Liang Zhao, et al. Janusflow: Harmonizing autoregression and rectified flow for unified multimodal understanding and generation. arXiv preprint arXiv:2411.07975, 2024. 2 +[73] Chenlin Meng, Kristy Choi, Jiaming Song, and Stefano Ermon. Concrete score matching: Generalized score matching for discrete data. Advances in Neural Information Processing Systems, 35:34532-34545, 2022. 2 +[74] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2019. 58 +[75] Midjourney. Midjourney. https://www.midjourney.com, 2024. 2, 6, 7, 18, 19, 20, 59, 60, 61 + +[76] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 2021. 58 +[77] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2020. 58 +[78] OpenAI. Addendum to gpt-4o system card: 4o image generation, 2025. Accessed: 2025-04-02. 2 +[79] Junyi Pan, Xiaoguang Han, Weikai Chen, Jiapeng Tang, and Kui Jia. Deep mesh reconstruction from single rgb images via topology modification networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2019. 58 +[80] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed AA Osman, Dimitrios Tzionas, and Michael J Black. Expressive body capture: 3d hands, face, and body from a single image. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2019. 58 +[81] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023. 5 +[82] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. SDXL: Improving latent diffusion models for high-resolution image synthesis. In The Twelfth International Conference on Learning Representations (ICLR), 2024. 1, 47, 50 +[83] Guocheng Qian, Jinjie Mai, Abdullah Hamdi, Jian Ren, Aliaksandr Siarohin, Bing Li, Hsin-Ying Lee, Ivan Skorokhodov, Peter Wonka, Sergey Tulyakov, et al. Magic123: One image to high-quality 3d object generation using both 2d and 3d diffusion priors. arXiv preprint arXiv:2306.17843, 2023. 58 +[84] Chu-Jie Qin, Rui-Qi Wu, Zikun Liu, Xin Lin, Chun-Le Guo, Hyun Hee Park, and Chongyi Li. Restore anything with masks: Leveraging mask image modeling for blind all-in-one image restoration. In ECCV, 2024. 34 +[85] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 5 +[86] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Radle, Chloe Rolland, Laura Gustafson, et al. SAM 2: Segment anything in images and videos. *ICLR*, 2025. 80, 81, 82, 83, 84 +[87] Jeremy Reizenstein, Roman Shapovalov, Philipp Henzler, Luca Sbordone, Patrick Labatut, and David Novotny. Common objects in 3d: Large-scale learning and evaluation of real-life 3d category reconstruction. In ICCV, 2021. 58 +[88] Bin Ren, Yawei Li, Nancy Mehta, and Radu Timofte. The ninth nitire 2024 efficient super-resolution challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2024. 34 +[89] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 10684-10695, 2022. 1 +[90] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10684-10695, June 2022. 47, 52 +[91] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In CVPR, 2023. 28 +[92] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 2022. 5 +[93] Subham Sahoo, Marianne Arriola, Yair Schiff, Aaron Gokaslan, Edgar Marroquin, Justin Chiu, Alexander Rush, and Volodymyr Kuleshov. Simple and effective masked diffusion language models. Advances in Neural Information Processing Systems, 37:130136-130184, 2024. 2 +[94] Qingyu Shi, Lu Qi, Jianzong Wu, Jinbin Bai, Jingbo Wang, Yunhai Tong, Xiangtai Li, and Ming-Husan Yang. Relation- booth: Towards relation-aware customized object generation. arXiv preprint arXiv:2410.23280, 2024. 28 +[95] Haoze Sun, Wenbo Li, Jianzhuang Liu, Haoyu Chen, Renjing Pei, Xueyi Zou, Youliang Yan, and Yujiu Yang. Coser: Bridging image and language for cognitive super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 25868-25878, 2024. 34 +[96] Quan Sun, Qiying Yu, Yufeng Cui, Fan Zhang, Xiaosong Zhang, Yueze Wang, Hongcheng Gao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative pretraining in multimodality. arXiv preprint arXiv:2307.05222, 2023.1 +[97] Alexander Swerdlow, Mihir Prabhudesai, Siddharth Gandhi, Deepak Pathak, and Katerina Fragkiadaki. Unified multimodal discrete diffusion. arXiv preprint arXiv:2503.20853, 2025. 2 +[98] Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024. 1 + +[99] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 2, 3, 5, 6, 7, 8, 9, 10, 12, 14, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 59, 60, 61, 63, 64, 65, 67, 68, 70, 71, 72, 73, 75, 76, 77, 78, 79, 81, 82, 83, 84 +[100] Shengbang Tong, David Fan, Jiachen Zhu, Yunyang Xiong, Xinlei Chen, Koustuv Sinha, Michael Rabbat, Yann LeCun, Saining Xie, and Zhuang Liu. Metamorph: Multimodal understanding and generation via instruction tuning. arXiv preprint arXiv:2412.14164, 2024. 1 +[101] Chunwei Wang, Guansong Lu, Junwei Yang, Runhui Huang, Jianhua Han, Lu Hou, Wei Zhang, and Hang Xu. Illumine: Illuminating your llms to see, draw, and self-enhance. arXiv preprint arXiv:2412.06673, 2024. 1 +[102] Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, and Yu-Gang Jiang. Pixel2mesh: Generating 3d mesh models from single rgb images. In Proceedings of the European conference on computer vision (ECCV), 2018. 58 +[103] Xierui Wang, Siming Fu, Qihan Huang, Wanggui He, and Hao Jiang. Ms-diffusion: Multi-subject zero-shot image personalization with layout guidance. arXiv preprint arXiv:2406.07209, 2024. 28, 30 +[104] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 1 +[105] Zhendong Wang, Yifan Jiang, Yadong Lu, Pengcheng He, Weizhu Chen, Zhangyang Wang, Mingyuan Zhou, et al. In-context learning unlocked for diffusion models. NeurIPS, 2023. 56 +[106] Zhenyu Wang, Aoxue Li, Zhenguo Li, and Xihui Liu. Genartist: Multimodal llm as an agent for unified image generation and editing. NeurIPS, 2024. 5 +[107] Alex Warren, Ke Xu, Jiaying Lin, Gary KL Tam, and Rynson WH Lau. Effective video mirror detection with inconsistent motion cues. In CVPR, 2024. 69, 71 +[108] Jianzong Wu, Chao Tang, Jingbo Wang, Yanhong Zeng, Xiangtai Li, and Yunhai Tong. Diffensei: Bridging multi-modal lms and diffusion models for customized manga generation. CVPR, 2025. 31, 33 +[109] Size Wu, Wenwei Zhang, Lumin Xu, Sheng Jin, Zhonghua Wu, Qingyi Tao, Wentao Liu, Wei Li, and Chen Change Loy. Harmonizing visual representations for unified multimodal understanding and generation. arXiv preprint arXiv:2503.21979, 2025. 1 +[110] Yecheng Wu, Zhuoyang Zhang, Junyu Chen, Haotian Tang, Dacheng Li, Yunhao Fang, Ligeng Zhu, Enze Xie, Hongxu Yin, Li Yi, et al. Vila-u: a unified foundation model integrating visual understanding and generation. arXiv preprint arXiv:2409.04429, 2024. 1 +[111] Yifan Xia, Yuying Ge, Jing Zhang, Yuchao Dai, and Ming-Ming Cheng. Seed-story: Multimodal long story generation with large language model. arXiv preprint arXiv:2407.08683, 2024. 31, 32 +[112] Shitao Xiao, Yueze Wang, Junjie Zhou, Huaying Yuan, Xingrun Xing, Ruiran Yan, Shuting Wang, Tiejun Huang, and Zheng Liu. Omnigen: Unified image generation. arXiv preprint arXiv:2409.11340, 2024. 2 +[113] Jiale Xu, Weihao Cheng, Yiming Gao, Xintao Wang, Shenghua Gao, and Ying Shan. Instantmesh: Efficient 3d mesh generation from a single image with sparse-view large reconstruction models. arXiv preprint arXiv:2404.07191, 2024.58 +[114] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10371-10381, 2024. 74, 75 +[115] Ling Yang, Zhaochen Yu, Chenlin Meng, Minkai Xu, Stefano Ermon, and Bin Cui. Mastering text-to-image diffusion: Recaptioning, planning, and generating with multimodal llms. In ICML, 2024. 5 +[116] Hang Yu, Ruilin Li, Shaorong Xie, and Jiayan Qiu. Shadow-eligible image outpainting. In CVPR, 2024. 34, 42 +[117] Haobo Yuan, Xiangtai Li, Tao Zhang, Zilong Huang, Shilin Xu, Shunping Ji, Yunhai Tong, Lu Qi, Jiashi Feng, and Ming-Hsuan Yang. Sa2va: Marrying sam2 with llava for dense grounded understanding of images and videos. arXiv, 2025. 62, 63 +[118] Yu Yuan, Xijun Wang, Yichen Sheng, Prateek Chennuri, Xingguang Zhang, and Stanley Chan. Generative photography: Scene-consistent camera control for realistic text-to-image synthesis. arXiv preprint arXiv:2412.02168, 2024. 53, 54, 55 +[119] Cheng Zhang, Qianyi Wu, Camilo Cruz Gambardella, Xiaoshui Huang, Dinh Phung, Wanli Ouyang, and Jianfei Cai. Taming stable diffusion for text to $360^{\circ}$ panorama image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 17 +[120] Kai Zhang, Lingbo Mo, Wenhu Chen, Huan Sun, and Yu Su. Magicbrush: A manually annotated dataset for instruction-guided image editing. In NeurIPS, 2023. 21, 25, 26, 27 +[121] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models, 2023. 47 + +[122] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Scaling in-the-wild training for diffusion-based illumination harmonization and editing by imposing consistent light transport. In ICLR, 2025. 34, 46 +[123] Wenwei Zhang, Jiangmiao Pang, Kai Chen, and Chen Change Loy. K-net: Towards unified image segmentation. Advances in Neural Information Processing Systems, 34:10326-10338, 2021. 62, 65 +[124] Xinchen Zhang, Ling Yang, Guohao Li, Yaqi Cai, Jiake Xie, Yong Tang, Yujiu Yang, Mengdi Wang, and Bin Cui. Itercomp: Iterative composition-aware feedback learning from model gallery for text-to-image generation. arXiv preprint arXiv:2410.07171, 2024.5 +[125] Yuxuan Zhang, Yiren Song, Jiaming Liu, Rui Wang, Jinpeng Yu, Hao Tang, Huaxia Li, Xu Tang, Yao Hu, Han Pan, et al. Ssr-encoder: Encoding selective subject representation for subject-driven generation. In CVPR, 2024. 28 +[126] Chuyang Zhao, Yuxing Song, Wenhao Wang, Haocheng Feng, Errui Ding, Yifan Sun, Xinyan Xiao, and Jingdong Wang. Monofrformer: One transformer for both diffusion and autoregression. arXiv preprint arXiv:2409.16280, 2024. 2 +[127] Peng Zheng, Dehong Gao, Deng-Ping Fan, Li Liu, Jorma Laaksonen, Wanli Ouyang, and Nicu Sebe. Bilateral reference for high-resolution dichotomous image segmentation. CCAI, 2024. 69, 70, 73 +[128] Chunting Zhou, Lili Yu, Arun Babu, Kushal Tirumala, Michihiro Yasunaga, Leonid Shamis, Jacob Kahn, Xuezhe Ma, Luke Zettlemoyer, and Omer Levy. Transfusion: Predict the next token and diffuse images with one multi-modal model. arXiv preprint arXiv:2408.11039, 2024. 2 +[129] Donghao Zhou, Jiancheng Huang, Jinbin Bai, Jiaze Wang, Hao Chen, Guangyong Chen, Xiaowei Hu, and Pheng-Ann Heng. MagicTailor: Component-controllable personalization in text-to-image diffusion models. arXiv preprint arXiv:2410.13370, 2024. 28 +[130] Zhiyu Zhu, Yingcong Chen, Zhenyu Xie, and Jingyi Yu. Disenvisioner: Disentangled and enriched visual prompt for customized image generation. arXiv preprint arXiv:2410.02067, 2024. 28, 29 +[131] Silvia Zuffi, Angjoo Kanazawa, and Michael J Black. Lions and tigers and bears: Capturing non-rigid, 3d, articulated shape from images. In CVPR, 2018. 58 \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05979/images/001d30c3201ae2f4664bf8ee499b66e8df811fc307c2c93884ed70982fa958ca.jpg b/data/2025/2504_05xxx/2504.05979/images/001d30c3201ae2f4664bf8ee499b66e8df811fc307c2c93884ed70982fa958ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bc2a88b321d883e38ca1856791ffa48608bfa2dd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/001d30c3201ae2f4664bf8ee499b66e8df811fc307c2c93884ed70982fa958ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f0a5c05de62beba3d4834eb7d55131a765efdbefbcc6ef3dcdde9b78515d747 +size 10423 diff --git a/data/2025/2504_05xxx/2504.05979/images/00b435bd1d8026aa4f0d08be8a9579479f625012cae88f49f81a5d958cf5c199.jpg b/data/2025/2504_05xxx/2504.05979/images/00b435bd1d8026aa4f0d08be8a9579479f625012cae88f49f81a5d958cf5c199.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6697c8f9c8b88f888963709aff1dc1dd50e88c39 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/00b435bd1d8026aa4f0d08be8a9579479f625012cae88f49f81a5d958cf5c199.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52b2b0ea17e418ed217a61d20f93d1ea629af13b3d7c8533da914a0220534cd7 +size 9582 diff --git a/data/2025/2504_05xxx/2504.05979/images/00cde007c7ba440020df1e4e47694ace493a34ef86885baa498937144d1106a2.jpg b/data/2025/2504_05xxx/2504.05979/images/00cde007c7ba440020df1e4e47694ace493a34ef86885baa498937144d1106a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ac634ea46a1489ca849e3dcf85cad1883743cbb6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/00cde007c7ba440020df1e4e47694ace493a34ef86885baa498937144d1106a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6471590da564eeb776ec49961be9c4255137682487912c007426f3794c417d1 +size 13642 diff --git a/data/2025/2504_05xxx/2504.05979/images/013887f45aed224843b4df7ea2aba71451637e932d1143fbfcacbd27271f65f3.jpg b/data/2025/2504_05xxx/2504.05979/images/013887f45aed224843b4df7ea2aba71451637e932d1143fbfcacbd27271f65f3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0470dcce48120cb7b9dacd96d60580eeb228ccd6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/013887f45aed224843b4df7ea2aba71451637e932d1143fbfcacbd27271f65f3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35af6e52b7c26b512bdf34540264c325b6f1a80e7136316aa1a83d3bf228edf9 +size 13766 diff --git a/data/2025/2504_05xxx/2504.05979/images/01c729abfdd7ba1065b518b5f8fac38df17a322d899f16a92e21606ffe75be31.jpg b/data/2025/2504_05xxx/2504.05979/images/01c729abfdd7ba1065b518b5f8fac38df17a322d899f16a92e21606ffe75be31.jpg new file mode 100644 index 0000000000000000000000000000000000000000..be811b7df1607e9f95683ba350022766d41ee739 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/01c729abfdd7ba1065b518b5f8fac38df17a322d899f16a92e21606ffe75be31.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:353faf5f0f34c22e3ad45156d8b651e319deff9e912eed3afcb2d1b46effd61f +size 17695 diff --git a/data/2025/2504_05xxx/2504.05979/images/02202d497506f20a18941658d169deee92ee7a8b17e62ab0a67e4f38d7fc3f81.jpg b/data/2025/2504_05xxx/2504.05979/images/02202d497506f20a18941658d169deee92ee7a8b17e62ab0a67e4f38d7fc3f81.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0edd1f1e0b7d26a1130c576919fbc5f9616a42aa --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/02202d497506f20a18941658d169deee92ee7a8b17e62ab0a67e4f38d7fc3f81.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4f6b4393817d986f895d6d499e18c3c755df3632f7fd8c94ee1de545e7605c8 +size 7289 diff --git a/data/2025/2504_05xxx/2504.05979/images/023c34de6dd441f99db1b377780b4fdcd6666f8679ab43c519b68c515e5d0add.jpg b/data/2025/2504_05xxx/2504.05979/images/023c34de6dd441f99db1b377780b4fdcd6666f8679ab43c519b68c515e5d0add.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f1425e463695b179154e5559cd102a59686d79bb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/023c34de6dd441f99db1b377780b4fdcd6666f8679ab43c519b68c515e5d0add.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5877fdb0dd17a5b4db55ca72c0f934df6666b9f48e7f24ea76f8a890e34a6ce2 +size 14382 diff --git a/data/2025/2504_05xxx/2504.05979/images/028da69b538ec977850baf21c5611c81d88bf976beeeeeccb70474beba17944b.jpg b/data/2025/2504_05xxx/2504.05979/images/028da69b538ec977850baf21c5611c81d88bf976beeeeeccb70474beba17944b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7a654842af02308a45a1e46a11fa31c372888ada --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/028da69b538ec977850baf21c5611c81d88bf976beeeeeccb70474beba17944b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17d6070578641696f1059f22eecf31687146285c3a8e30a3fb69c8dd61b7b0ca +size 19721 diff --git a/data/2025/2504_05xxx/2504.05979/images/032dd6d95007fdd7dba51b274352d33e5c899e1f0a93a08947da88f005ed8904.jpg b/data/2025/2504_05xxx/2504.05979/images/032dd6d95007fdd7dba51b274352d33e5c899e1f0a93a08947da88f005ed8904.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e4739c60e7f9bcea7d9656ea6a3d447482266ae6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/032dd6d95007fdd7dba51b274352d33e5c899e1f0a93a08947da88f005ed8904.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:826a7e47dd78bf7d1ebdd57fc98291cd4342f4637c6d955360b99724a0f87c16 +size 7016 diff --git a/data/2025/2504_05xxx/2504.05979/images/033fe2dbc7534a41ab6e0d52eae6d42fbd80b6b350bb3fbca8b041c43fc41b40.jpg b/data/2025/2504_05xxx/2504.05979/images/033fe2dbc7534a41ab6e0d52eae6d42fbd80b6b350bb3fbca8b041c43fc41b40.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a353f1c2b592eee42bf5d40d4a3cc25f43b4f0a2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/033fe2dbc7534a41ab6e0d52eae6d42fbd80b6b350bb3fbca8b041c43fc41b40.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a886159bb8d465823d68df6c60237c6e622f5c54ca512b398a380b11b1890fe +size 13947 diff --git a/data/2025/2504_05xxx/2504.05979/images/038bd8bcbb952c2d249e6b969e11edf6c615ea7d80b89e647965ce83d7b12973.jpg b/data/2025/2504_05xxx/2504.05979/images/038bd8bcbb952c2d249e6b969e11edf6c615ea7d80b89e647965ce83d7b12973.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dadffc103965ea3866cc30fee798309e6760da0f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/038bd8bcbb952c2d249e6b969e11edf6c615ea7d80b89e647965ce83d7b12973.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c62a0e0fec078f6507aab73e4da562daa111c4971a0bec208d6804b21dc8e68 +size 13895 diff --git a/data/2025/2504_05xxx/2504.05979/images/03ac5a08165418ccecfd0363ad1b72667a075da1b20d2f84dff1cf04027bdc79.jpg b/data/2025/2504_05xxx/2504.05979/images/03ac5a08165418ccecfd0363ad1b72667a075da1b20d2f84dff1cf04027bdc79.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f556cf2378b62ee76adb6ca2ab8954a9fa441a9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/03ac5a08165418ccecfd0363ad1b72667a075da1b20d2f84dff1cf04027bdc79.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d3e0d32b15355325689f1c3f363876102cdec6950532a1a5dadd95570844f18 +size 9061 diff --git a/data/2025/2504_05xxx/2504.05979/images/03c4e2f2110cca390cec16e188035acf44295367b91027a771e4e53dedaf79e1.jpg b/data/2025/2504_05xxx/2504.05979/images/03c4e2f2110cca390cec16e188035acf44295367b91027a771e4e53dedaf79e1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..26bcd8625adc61dcb2a212ac9b6f9c1c82386a85 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/03c4e2f2110cca390cec16e188035acf44295367b91027a771e4e53dedaf79e1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e51fd4954731cdf35ffc4ec4eb8098b771073cdd8b8172475385f0e93e42b943 +size 6816 diff --git a/data/2025/2504_05xxx/2504.05979/images/03db7b6af0e509243f783b8ce2d6a7d6f8244e6d1c3cca245f733119d0adc4d8.jpg b/data/2025/2504_05xxx/2504.05979/images/03db7b6af0e509243f783b8ce2d6a7d6f8244e6d1c3cca245f733119d0adc4d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..851fe4f6fe1926e5d93ab9c70cc2dc606bfd64ff --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/03db7b6af0e509243f783b8ce2d6a7d6f8244e6d1c3cca245f733119d0adc4d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:902070cfe74b7d9f621127c68d5618171a36960d8db2e466693a08412391586f +size 4966 diff --git a/data/2025/2504_05xxx/2504.05979/images/0419b8779eb79040a7d1330971acc7342fec2dd90c7626958522a6fb5f0fcf8e.jpg b/data/2025/2504_05xxx/2504.05979/images/0419b8779eb79040a7d1330971acc7342fec2dd90c7626958522a6fb5f0fcf8e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..12ed179e8e5e9811d5bd6ea895e4dd299bf31c74 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0419b8779eb79040a7d1330971acc7342fec2dd90c7626958522a6fb5f0fcf8e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e34294b3218fb5d1ac80114c2c40d71668e46540799194a27b79d668deab564a +size 3511 diff --git a/data/2025/2504_05xxx/2504.05979/images/042e2b1b154968fc9607cbadf6e30a37999a81fe9a06bc6f75a82aeaafc4d979.jpg b/data/2025/2504_05xxx/2504.05979/images/042e2b1b154968fc9607cbadf6e30a37999a81fe9a06bc6f75a82aeaafc4d979.jpg new file mode 100644 index 0000000000000000000000000000000000000000..328ed9f6dad2213e5726736afb774a93e9fd888b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/042e2b1b154968fc9607cbadf6e30a37999a81fe9a06bc6f75a82aeaafc4d979.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36531e3793ffb729d47304d46a76cb9dee62a593b612a186b8f836d1e742bdce +size 17493 diff --git a/data/2025/2504_05xxx/2504.05979/images/0459972536685fbb8896000173723ed46bcf8309ab5dd8cd3867ab99d79da23a.jpg b/data/2025/2504_05xxx/2504.05979/images/0459972536685fbb8896000173723ed46bcf8309ab5dd8cd3867ab99d79da23a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..115eb3a9ed601a106719dbf19ea4565083a0b177 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0459972536685fbb8896000173723ed46bcf8309ab5dd8cd3867ab99d79da23a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:729ffbac63b542183bd82b9cd0af6084efc31be163d48da6610b705f69665063 +size 7941 diff --git a/data/2025/2504_05xxx/2504.05979/images/04a1ddeb73159d510035c4040bc6dea5a7b55b6b32df8201b3f02a9a20621b00.jpg b/data/2025/2504_05xxx/2504.05979/images/04a1ddeb73159d510035c4040bc6dea5a7b55b6b32df8201b3f02a9a20621b00.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c21e478dd848d5228793c7e990410dfcf00acb96 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/04a1ddeb73159d510035c4040bc6dea5a7b55b6b32df8201b3f02a9a20621b00.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ba27cd77f7483085f76dc28363054613382aa55e99f6fef9cf1b9c19e52b7fb +size 11773 diff --git a/data/2025/2504_05xxx/2504.05979/images/04d61817b36efe3075ecd5a5ab06c82b578abc68918b2566f24ba1ac9e539e1b.jpg b/data/2025/2504_05xxx/2504.05979/images/04d61817b36efe3075ecd5a5ab06c82b578abc68918b2566f24ba1ac9e539e1b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6528981c8d8649afe7bde1e393d3f10d4271d892 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/04d61817b36efe3075ecd5a5ab06c82b578abc68918b2566f24ba1ac9e539e1b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f0cd3ffdbe65d479778c81fada263776e83307b3e508f3ac4bae263b14dee67 +size 5441 diff --git a/data/2025/2504_05xxx/2504.05979/images/05076e4a3056272c3ce291b647bd2b2098f8bef74c7807b41ab1d48af70b1f0e.jpg b/data/2025/2504_05xxx/2504.05979/images/05076e4a3056272c3ce291b647bd2b2098f8bef74c7807b41ab1d48af70b1f0e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c83cb70c1ed5530886d50d12f4db8b19c13da045 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/05076e4a3056272c3ce291b647bd2b2098f8bef74c7807b41ab1d48af70b1f0e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b83c3381dd1b586b3130a7bfbd6e3989101db6b86fbd9d27f914734e6b4b02c3 +size 10341 diff --git a/data/2025/2504_05xxx/2504.05979/images/05f1f17e9903bfc312375e98f666c3fae0cca18b6d2b7c60fe1605e04fefa3d0.jpg b/data/2025/2504_05xxx/2504.05979/images/05f1f17e9903bfc312375e98f666c3fae0cca18b6d2b7c60fe1605e04fefa3d0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea2ba5cbe80e95fc548e5fb7755a045a771b2a77 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/05f1f17e9903bfc312375e98f666c3fae0cca18b6d2b7c60fe1605e04fefa3d0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8286e910eaeb1dd97c1b4de56f70e28ad87e1056c6981fe355fe5d59be82c468 +size 19464 diff --git a/data/2025/2504_05xxx/2504.05979/images/05f5665f542cca655067ebe6c202394a79f20e88be3af6125a418ad1ddb38552.jpg b/data/2025/2504_05xxx/2504.05979/images/05f5665f542cca655067ebe6c202394a79f20e88be3af6125a418ad1ddb38552.jpg new file mode 100644 index 0000000000000000000000000000000000000000..675d0679af1bf363da547839fea8bca866b1f8c3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/05f5665f542cca655067ebe6c202394a79f20e88be3af6125a418ad1ddb38552.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af48a9572aff18c971213d9f6f477b2846c754bfddaab33aa5ab03ed58a40cff +size 13733 diff --git a/data/2025/2504_05xxx/2504.05979/images/0602a903854b76208c57b28e4349dae82df8b2aa2e65f275df6b6940b8ec8061.jpg b/data/2025/2504_05xxx/2504.05979/images/0602a903854b76208c57b28e4349dae82df8b2aa2e65f275df6b6940b8ec8061.jpg new file mode 100644 index 0000000000000000000000000000000000000000..83784925eec2904399abc45542927e54dfdcb5b1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0602a903854b76208c57b28e4349dae82df8b2aa2e65f275df6b6940b8ec8061.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a00654fb7740062cafa5fb1effda0a15ed7fc7fba1ea7199ed22b15272c12e53 +size 5046 diff --git a/data/2025/2504_05xxx/2504.05979/images/0673e0b100dc90c3154a0f84f46af572d02b7c0ce4f3ef16fade43705be3a169.jpg b/data/2025/2504_05xxx/2504.05979/images/0673e0b100dc90c3154a0f84f46af572d02b7c0ce4f3ef16fade43705be3a169.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ca6825f171464f694390469845c7fd21243281ab --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0673e0b100dc90c3154a0f84f46af572d02b7c0ce4f3ef16fade43705be3a169.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0d5a915415068577e528fc224d879eb28910a6cb6632957f3433297b9e80cbd +size 11317 diff --git a/data/2025/2504_05xxx/2504.05979/images/06b68c2bd1d1e1e567e71454157f92909dcb91789d0b950ba4f61dd09198910d.jpg b/data/2025/2504_05xxx/2504.05979/images/06b68c2bd1d1e1e567e71454157f92909dcb91789d0b950ba4f61dd09198910d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..22030e0ea1f2d3822f2269aee0d751a46c565f72 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/06b68c2bd1d1e1e567e71454157f92909dcb91789d0b950ba4f61dd09198910d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d93f2c67c4795bbe367a77238764cd85318f0c699345d20ad564bb9e82bf0cfa +size 9758 diff --git a/data/2025/2504_05xxx/2504.05979/images/06dfd66ade1356c7cd3d3f8e966e4e534aad24c5c887a40d0ac06dfd803d92ab.jpg b/data/2025/2504_05xxx/2504.05979/images/06dfd66ade1356c7cd3d3f8e966e4e534aad24c5c887a40d0ac06dfd803d92ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2248ab304d8c92e6968d534c18292b4bcfac0b9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/06dfd66ade1356c7cd3d3f8e966e4e534aad24c5c887a40d0ac06dfd803d92ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b004d6e96d0ceab76efb0d2ed7afa25a69555e03b17943d7b1ca246db027474f +size 10928 diff --git a/data/2025/2504_05xxx/2504.05979/images/072676f5a96fdb59a2c5611f9262a740e1f39908971d09d9835789f46d395625.jpg b/data/2025/2504_05xxx/2504.05979/images/072676f5a96fdb59a2c5611f9262a740e1f39908971d09d9835789f46d395625.jpg new file mode 100644 index 0000000000000000000000000000000000000000..edea02c7c49eb086664d3f4d3bcea28844bdf1e0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/072676f5a96fdb59a2c5611f9262a740e1f39908971d09d9835789f46d395625.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f85baf36272570bd6658eaaeff19082f93b02910697c4c5f343f121271a0cc1 +size 17479 diff --git a/data/2025/2504_05xxx/2504.05979/images/072dabf79d619daa8eeb4cd2f8867859c2a5525ca4ceff1ee839637ee46f829f.jpg b/data/2025/2504_05xxx/2504.05979/images/072dabf79d619daa8eeb4cd2f8867859c2a5525ca4ceff1ee839637ee46f829f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b616f72976e571c1eaa8f4e503d8d28c9df5d724 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/072dabf79d619daa8eeb4cd2f8867859c2a5525ca4ceff1ee839637ee46f829f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fba54f20e2a7fd4fe226d72a75525232b9aef6657814eeeeb6d78d621ddc3f0 +size 944 diff --git a/data/2025/2504_05xxx/2504.05979/images/07315caab9c8a97608642744aa18c2e35c6920d402732aa3a2675180b0a7af1e.jpg b/data/2025/2504_05xxx/2504.05979/images/07315caab9c8a97608642744aa18c2e35c6920d402732aa3a2675180b0a7af1e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..887a59bfccd906f65331768f3019b40744ef33ef --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/07315caab9c8a97608642744aa18c2e35c6920d402732aa3a2675180b0a7af1e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fc1d4062603a94d08bbfdb060caf327d24ce4ceb269a1ecb369250e898d3f95 +size 13778 diff --git a/data/2025/2504_05xxx/2504.05979/images/073d5da23f5b7f75f55840c7e941a2744edbcb0a242ace828bcaf195e2c9978e.jpg b/data/2025/2504_05xxx/2504.05979/images/073d5da23f5b7f75f55840c7e941a2744edbcb0a242ace828bcaf195e2c9978e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d0da023535db970d49c7a2f8933079ac83b882f8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/073d5da23f5b7f75f55840c7e941a2744edbcb0a242ace828bcaf195e2c9978e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:462dc1237d4071a8061ebe5cee72f6ef429d764291c52a056c72680ee648ba17 +size 3040 diff --git a/data/2025/2504_05xxx/2504.05979/images/075019b013e4717772a6d76e14fbaf9862c46b4f8223096c86fa73cb18a7fffb.jpg b/data/2025/2504_05xxx/2504.05979/images/075019b013e4717772a6d76e14fbaf9862c46b4f8223096c86fa73cb18a7fffb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..27e5cca0477e591dfddc23d5a03c20dfdca9c18c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/075019b013e4717772a6d76e14fbaf9862c46b4f8223096c86fa73cb18a7fffb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88640398009ca365d6a5c100ede7987699e6e30ee5d485d93183582e1ca4e762 +size 12413 diff --git a/data/2025/2504_05xxx/2504.05979/images/0755edc88c21d2311018ccff73e7808905f504f9fd16b4b534f5cee1942b18b2.jpg b/data/2025/2504_05xxx/2504.05979/images/0755edc88c21d2311018ccff73e7808905f504f9fd16b4b534f5cee1942b18b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a28a395d4b61ab36a0d1ada4984abcc779a01653 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0755edc88c21d2311018ccff73e7808905f504f9fd16b4b534f5cee1942b18b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3b9d18d6fe0e96ead3eeb20d726f64b00564a5cc00fd7e519d841ad8fa70c7a +size 8262 diff --git a/data/2025/2504_05xxx/2504.05979/images/0759fb223e415e0f19710f612a3c80b614bae80774a18396353bf7b14eb7da5d.jpg b/data/2025/2504_05xxx/2504.05979/images/0759fb223e415e0f19710f612a3c80b614bae80774a18396353bf7b14eb7da5d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..543bd66ecd66e72c25c0f0156d7148de4291d2c6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0759fb223e415e0f19710f612a3c80b614bae80774a18396353bf7b14eb7da5d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e134fbd06533996c4d4074e6f7d9f895d9f2cc7872828950132204bb260ff5a +size 14818 diff --git a/data/2025/2504_05xxx/2504.05979/images/0771e85cb3485940ea65217af54563edb5368d1ed7f290c2f9c5ad8bdccee8ee.jpg b/data/2025/2504_05xxx/2504.05979/images/0771e85cb3485940ea65217af54563edb5368d1ed7f290c2f9c5ad8bdccee8ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb5a219efd4f2b250c6a2a093eeb52bf03864ab5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0771e85cb3485940ea65217af54563edb5368d1ed7f290c2f9c5ad8bdccee8ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ad8fffc04513faab2d4e55ee9ec1f622486d7f193687fad985b59e3cc0eb025 +size 10173 diff --git a/data/2025/2504_05xxx/2504.05979/images/077315bfabdff852ee00ffcd33f6dc6141773a534acf687e5c1e7ad5284a7a19.jpg b/data/2025/2504_05xxx/2504.05979/images/077315bfabdff852ee00ffcd33f6dc6141773a534acf687e5c1e7ad5284a7a19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..223db142faae0873538bc81f99f12caa36d3bb0c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/077315bfabdff852ee00ffcd33f6dc6141773a534acf687e5c1e7ad5284a7a19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3c8ad81866482f7d5a4103bfb329aa1255e2c72c2d286b309fe7d700e5b2704 +size 5573 diff --git a/data/2025/2504_05xxx/2504.05979/images/07fe029890bbeded3efc202fc24a6394fb11dda52778c7ad9cd6a134088f6ae9.jpg b/data/2025/2504_05xxx/2504.05979/images/07fe029890bbeded3efc202fc24a6394fb11dda52778c7ad9cd6a134088f6ae9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b3d4bfcde503522411f5ac46629854988edd977 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/07fe029890bbeded3efc202fc24a6394fb11dda52778c7ad9cd6a134088f6ae9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a11ff972c30acac5e80a9942a38ff71b3d8523469bca50e02b1a8218a58237c +size 12753 diff --git a/data/2025/2504_05xxx/2504.05979/images/084f24a984613e1c05f7c5e2fa9817167a1fbac88e5cd473c3386ae1bd4b9322.jpg b/data/2025/2504_05xxx/2504.05979/images/084f24a984613e1c05f7c5e2fa9817167a1fbac88e5cd473c3386ae1bd4b9322.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9431aebda60af94d38c8c2a4f56d87e3f976426f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/084f24a984613e1c05f7c5e2fa9817167a1fbac88e5cd473c3386ae1bd4b9322.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6becacc9e424907aa9fb94272f15996c77a00be3949ba2aec751a9414aec23d1 +size 5818 diff --git a/data/2025/2504_05xxx/2504.05979/images/08504d0ac60ae0b5850f6667bc744a4e3d743395a161f0de6a775051bb6f7278.jpg b/data/2025/2504_05xxx/2504.05979/images/08504d0ac60ae0b5850f6667bc744a4e3d743395a161f0de6a775051bb6f7278.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9866f59ef4c05b0df18d520bb6f946384f385ac4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/08504d0ac60ae0b5850f6667bc744a4e3d743395a161f0de6a775051bb6f7278.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c5dce9f553198fe4d2270d64323c0014c4fb9c4af407bec3826796a61757a48 +size 7114 diff --git a/data/2025/2504_05xxx/2504.05979/images/087970cb94a73330a9143d8515c6bdd8a8061b49291d5ee3caa0aa8911c73064.jpg b/data/2025/2504_05xxx/2504.05979/images/087970cb94a73330a9143d8515c6bdd8a8061b49291d5ee3caa0aa8911c73064.jpg new file mode 100644 index 0000000000000000000000000000000000000000..533f9d367e2059d765229b50d2c5ca3bfb9bd987 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/087970cb94a73330a9143d8515c6bdd8a8061b49291d5ee3caa0aa8911c73064.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea232b5f46db01e5f42312a134b31f53f9e2923d9d88c319a512664688c4c780 +size 11786 diff --git a/data/2025/2504_05xxx/2504.05979/images/08d9d7d0f4498fa74ab30be14fe461e1fd36819539ce56b0a1505126518de149.jpg b/data/2025/2504_05xxx/2504.05979/images/08d9d7d0f4498fa74ab30be14fe461e1fd36819539ce56b0a1505126518de149.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc657d7fe36cf22651ed45d6b2717baa9faafd57 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/08d9d7d0f4498fa74ab30be14fe461e1fd36819539ce56b0a1505126518de149.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf093e25ee76483ebfd25dec0fedc3de78ae8e468deca5d12a683c9664e6bc83 +size 15955 diff --git a/data/2025/2504_05xxx/2504.05979/images/091b9e25759363e18a4a8027407ebff16dae2c1d7196bfd2671970b2dfdf18dc.jpg b/data/2025/2504_05xxx/2504.05979/images/091b9e25759363e18a4a8027407ebff16dae2c1d7196bfd2671970b2dfdf18dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28fe499626dbdf45b7b6bd303345b907838e8ede --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/091b9e25759363e18a4a8027407ebff16dae2c1d7196bfd2671970b2dfdf18dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0d3e458ea92baf018c5dde514a4a66ec4283f6b87a34e690b65b29ae6374e45 +size 15372 diff --git a/data/2025/2504_05xxx/2504.05979/images/0964f3c1bd27ee97370c3070e8b80b145333e5ecc74f44dff7f9bad2234476a9.jpg b/data/2025/2504_05xxx/2504.05979/images/0964f3c1bd27ee97370c3070e8b80b145333e5ecc74f44dff7f9bad2234476a9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..159afafd7c0cb63d86a838d01dc7d668d3f9ec85 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0964f3c1bd27ee97370c3070e8b80b145333e5ecc74f44dff7f9bad2234476a9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0040d949424526208559fe3743234b0efd8ce126ccd311e5ea2fe6159ec2b753 +size 3456 diff --git a/data/2025/2504_05xxx/2504.05979/images/099e4ec78e062250f7bdab074cc4518ca8ed428e187a0bb348ddc4b8e86c87a1.jpg b/data/2025/2504_05xxx/2504.05979/images/099e4ec78e062250f7bdab074cc4518ca8ed428e187a0bb348ddc4b8e86c87a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6aa1231e45d2e7b5286ddee3239e70bba0abf58f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/099e4ec78e062250f7bdab074cc4518ca8ed428e187a0bb348ddc4b8e86c87a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40bff1de966b1d0421b362e66fe5cf768bf49a45a02a7adf49da534f50d00903 +size 4301 diff --git a/data/2025/2504_05xxx/2504.05979/images/09b169752ee560ce282e0e189791bd22c980b5f6d260fd06d327d0bb93aa3fdc.jpg b/data/2025/2504_05xxx/2504.05979/images/09b169752ee560ce282e0e189791bd22c980b5f6d260fd06d327d0bb93aa3fdc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f448d725e463a5a225d42a4695c6770e3fc71748 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/09b169752ee560ce282e0e189791bd22c980b5f6d260fd06d327d0bb93aa3fdc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba6e3c1298e8ee4f374eda2e7f181a851573bb5d384d348e58ab061bde18bf7d +size 17315 diff --git a/data/2025/2504_05xxx/2504.05979/images/0a19bafce54aa9b28620506b9c6051449ff283c024b7b8bdde3dea6959a5f2ab.jpg b/data/2025/2504_05xxx/2504.05979/images/0a19bafce54aa9b28620506b9c6051449ff283c024b7b8bdde3dea6959a5f2ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73b413a699721f9dbe9afa77b76d31127bb35781 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0a19bafce54aa9b28620506b9c6051449ff283c024b7b8bdde3dea6959a5f2ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7630a1901e781efb2bbc31d18a7aeca9a44e2b0387b9d7901844ab731ea7e62a +size 13142 diff --git a/data/2025/2504_05xxx/2504.05979/images/0a2a55819ee33dc9ba77d52891f0222654f300378e0c315f91df125d5feab4ef.jpg b/data/2025/2504_05xxx/2504.05979/images/0a2a55819ee33dc9ba77d52891f0222654f300378e0c315f91df125d5feab4ef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c359273e90167a270b517337092c2069ba2bb281 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0a2a55819ee33dc9ba77d52891f0222654f300378e0c315f91df125d5feab4ef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fbb6addfa3e8a3a80093e58c8891ffca0885b5824a7a32c5da23b2c1341ebe5 +size 22419 diff --git a/data/2025/2504_05xxx/2504.05979/images/0a75ab15149c9afdb4641c46e30bd50461202b7c88fa785bef866a2691d27f84.jpg b/data/2025/2504_05xxx/2504.05979/images/0a75ab15149c9afdb4641c46e30bd50461202b7c88fa785bef866a2691d27f84.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9116abf4b79a58b565f148299d9ce7dd84d3ace6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0a75ab15149c9afdb4641c46e30bd50461202b7c88fa785bef866a2691d27f84.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86647c40ae76fe1382ce6c9f2a31aa31005f7633540d5498195d8559aba85b5c +size 14878 diff --git a/data/2025/2504_05xxx/2504.05979/images/0a89af2a4699039f927a01b35f74a721a10ddc2eb096385ea8edf582d6c03d50.jpg b/data/2025/2504_05xxx/2504.05979/images/0a89af2a4699039f927a01b35f74a721a10ddc2eb096385ea8edf582d6c03d50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7df6143ec2019253c71faf9ff7b54f7caa17642 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0a89af2a4699039f927a01b35f74a721a10ddc2eb096385ea8edf582d6c03d50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3b9b02b54c01216298f48c0647bdc3f7445fe29760ab2890a70ec9ba10cb994 +size 13641 diff --git a/data/2025/2504_05xxx/2504.05979/images/0a94fc94c0714aa10ea27a9a7909f7e8481229af8d64048c14d36241ab29679f.jpg b/data/2025/2504_05xxx/2504.05979/images/0a94fc94c0714aa10ea27a9a7909f7e8481229af8d64048c14d36241ab29679f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0b6d97128a12e58fd7eb220b3412a612f1e2e55 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0a94fc94c0714aa10ea27a9a7909f7e8481229af8d64048c14d36241ab29679f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56a6f643420260e4a410b435f633994c30abdf98723a41a0fa92f67145a2556e +size 9949 diff --git a/data/2025/2504_05xxx/2504.05979/images/0a9ad99bf92f46485bb850e72e6f3103fbdbc9cd19afa549dae69229296ce436.jpg b/data/2025/2504_05xxx/2504.05979/images/0a9ad99bf92f46485bb850e72e6f3103fbdbc9cd19afa549dae69229296ce436.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ceb2522c17ec0f5e3cbadda85c328fb4c5f50c63 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0a9ad99bf92f46485bb850e72e6f3103fbdbc9cd19afa549dae69229296ce436.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dd34c4b6685da544d94a51c056e9eb0c9f03a061a3a7a56e864731b57777bcd +size 10520 diff --git a/data/2025/2504_05xxx/2504.05979/images/0aa8c1c419160769a182de4039193ca181bf320312a187d89844da7d1bcd55dc.jpg b/data/2025/2504_05xxx/2504.05979/images/0aa8c1c419160769a182de4039193ca181bf320312a187d89844da7d1bcd55dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..35c9e7ee66bb38e2e39d931b75df87fdea3a8485 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0aa8c1c419160769a182de4039193ca181bf320312a187d89844da7d1bcd55dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2483411f7bf9d3587b9257b932eecf1387e29cd53462d62dc693fb81b4ca1bd +size 11150 diff --git a/data/2025/2504_05xxx/2504.05979/images/0abc90b5e3bcbd0cb683654abfcc56966ce72e50c6e2cbbe3c61540b34a5b998.jpg b/data/2025/2504_05xxx/2504.05979/images/0abc90b5e3bcbd0cb683654abfcc56966ce72e50c6e2cbbe3c61540b34a5b998.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cb9bd7ceba170962bb8187f0b9d5fe6fc9474bba --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0abc90b5e3bcbd0cb683654abfcc56966ce72e50c6e2cbbe3c61540b34a5b998.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1177f514b87819465a6cbb74ffb7d06381fcd3e6af037abc5bb9ed6bfc6cc45 +size 3276 diff --git a/data/2025/2504_05xxx/2504.05979/images/0abff9671077ea8920b3cfaa655376afa170146e8731c668c911bd36367fe676.jpg b/data/2025/2504_05xxx/2504.05979/images/0abff9671077ea8920b3cfaa655376afa170146e8731c668c911bd36367fe676.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6399bb6830f0fb98e8836b2c008c378e5d61d94b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0abff9671077ea8920b3cfaa655376afa170146e8731c668c911bd36367fe676.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f53959c5b6e91fb40e2eadfafa54311dde13cd9a9c3f6fb83c4082a12ce1a67 +size 5853 diff --git a/data/2025/2504_05xxx/2504.05979/images/0b0cfcb2c756d34ecf6d719996e6b93b05c237d3b9298e8a45b297f87db15d04.jpg b/data/2025/2504_05xxx/2504.05979/images/0b0cfcb2c756d34ecf6d719996e6b93b05c237d3b9298e8a45b297f87db15d04.jpg new file mode 100644 index 0000000000000000000000000000000000000000..078da13f8587e4e7faa4ddd266ea9ce718498823 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0b0cfcb2c756d34ecf6d719996e6b93b05c237d3b9298e8a45b297f87db15d04.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99c79788ded358def328f53e6f3ea6b609bf4af976c18607fcd8f04c8a3a938c +size 6363 diff --git a/data/2025/2504_05xxx/2504.05979/images/0b161ea7ba45146ed5301977dbc4153cddf37d20e4042c25caf795a6761d7b64.jpg b/data/2025/2504_05xxx/2504.05979/images/0b161ea7ba45146ed5301977dbc4153cddf37d20e4042c25caf795a6761d7b64.jpg new file mode 100644 index 0000000000000000000000000000000000000000..97f34bed5ff56d898465232e6d92eed733fe3412 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0b161ea7ba45146ed5301977dbc4153cddf37d20e4042c25caf795a6761d7b64.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ed5100ddc5009c8dfa18e6d6354e15b9595e76a6829b2d09fcdd5e124e183f4 +size 7011 diff --git a/data/2025/2504_05xxx/2504.05979/images/0b872b88a7a28e69a1daae9c6423253e4db8e407800572b004fd616d814d9b24.jpg b/data/2025/2504_05xxx/2504.05979/images/0b872b88a7a28e69a1daae9c6423253e4db8e407800572b004fd616d814d9b24.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a2ac8a005dc789ad0c737d499b160f79a4e929a2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0b872b88a7a28e69a1daae9c6423253e4db8e407800572b004fd616d814d9b24.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8d59c883dde99600d77e4774d7b9c891c05cb30e9358355441440bb0df9c8af +size 13715 diff --git a/data/2025/2504_05xxx/2504.05979/images/0bad4b8a9e257c2529bf84609f3ee26b5086573d0af51df270d4d92513151ff4.jpg b/data/2025/2504_05xxx/2504.05979/images/0bad4b8a9e257c2529bf84609f3ee26b5086573d0af51df270d4d92513151ff4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..14b1fd5f9544b1f22d71c871c73fb6e1ecf7cdf8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0bad4b8a9e257c2529bf84609f3ee26b5086573d0af51df270d4d92513151ff4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8ed4fe47bc2a1e9f1aef47a84adffd129e13f34ad1f77240abee3c3a0865981 +size 12469 diff --git a/data/2025/2504_05xxx/2504.05979/images/0bcf87aef23dd6d3d1cfe80c898e5921b661e040732d6eeef59708f811da4ad7.jpg b/data/2025/2504_05xxx/2504.05979/images/0bcf87aef23dd6d3d1cfe80c898e5921b661e040732d6eeef59708f811da4ad7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e330343f5836bb952067dcbca1286060dbd28067 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0bcf87aef23dd6d3d1cfe80c898e5921b661e040732d6eeef59708f811da4ad7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa72d6c633eec508f5e7a0afdba9f56d7e7bafe83cd3da6d896d1642048f6fc9 +size 9648 diff --git a/data/2025/2504_05xxx/2504.05979/images/0be63390ebc394d88f7518d11a9497eca8059c414bc31788dd9f9bad0ee7ee64.jpg b/data/2025/2504_05xxx/2504.05979/images/0be63390ebc394d88f7518d11a9497eca8059c414bc31788dd9f9bad0ee7ee64.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3202bac6d897196fdbfc375a9efd2494911b3d64 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0be63390ebc394d88f7518d11a9497eca8059c414bc31788dd9f9bad0ee7ee64.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab255b1cc7a8d81a3bb0a2049d48f16b27f2489aad4b7a5302f8a533583e869e +size 8351 diff --git a/data/2025/2504_05xxx/2504.05979/images/0c0470a214490980a8b144c3e88a00976a14f32edb1762c91f3846a0c05ec5b2.jpg b/data/2025/2504_05xxx/2504.05979/images/0c0470a214490980a8b144c3e88a00976a14f32edb1762c91f3846a0c05ec5b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dcba0c55068349a1496963c820d157f7ab076a63 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0c0470a214490980a8b144c3e88a00976a14f32edb1762c91f3846a0c05ec5b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:103498672f72d1c92a064eb6c727284b4d9a9009a47d8449b1eac7beca4603bf +size 23808 diff --git a/data/2025/2504_05xxx/2504.05979/images/0c35796bfccda9797611a7529116ead987ca3802f68e686dd8c44e01a6055d5c.jpg b/data/2025/2504_05xxx/2504.05979/images/0c35796bfccda9797611a7529116ead987ca3802f68e686dd8c44e01a6055d5c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f64947ee94f3ac84bd0c4bf1e985a330d145a43 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0c35796bfccda9797611a7529116ead987ca3802f68e686dd8c44e01a6055d5c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:533f42b81afcc2458c90ddeb0a07f6d6d43443d5d1a0b5b740442fb7607f8d30 +size 949 diff --git a/data/2025/2504_05xxx/2504.05979/images/0ce5a44bd3cd7500b4b78db75c0f1ccc81baa684ffa77ebdb728c28b0fe9e785.jpg b/data/2025/2504_05xxx/2504.05979/images/0ce5a44bd3cd7500b4b78db75c0f1ccc81baa684ffa77ebdb728c28b0fe9e785.jpg new file mode 100644 index 0000000000000000000000000000000000000000..51a779ec9f0dce558fc27312ce8b296cc1eb1d7f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0ce5a44bd3cd7500b4b78db75c0f1ccc81baa684ffa77ebdb728c28b0fe9e785.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:116952ecb44fa46b18e7754d491fb96f6d4817e0e3630c78954e963b1f937b93 +size 14907 diff --git a/data/2025/2504_05xxx/2504.05979/images/0ceb9d3c24c10622601c5cd8f2bfe1590592e3f441bd27e97f56e47c7c523543.jpg b/data/2025/2504_05xxx/2504.05979/images/0ceb9d3c24c10622601c5cd8f2bfe1590592e3f441bd27e97f56e47c7c523543.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d9a18c6e447cd1193352a5c931db6c6fd121f9be --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0ceb9d3c24c10622601c5cd8f2bfe1590592e3f441bd27e97f56e47c7c523543.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b315393dd85cd9ddfa46f38edc0c6f00eae0827176e23b39a69ccf3f545e1679 +size 24845 diff --git a/data/2025/2504_05xxx/2504.05979/images/0cfb348c56fe1101617d57441e326b36eadf434565b6c1880650127baaf8224e.jpg b/data/2025/2504_05xxx/2504.05979/images/0cfb348c56fe1101617d57441e326b36eadf434565b6c1880650127baaf8224e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20055353075824d452d6b0c371dd87cca0ad679e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0cfb348c56fe1101617d57441e326b36eadf434565b6c1880650127baaf8224e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:130960350c23872c750013d54f17baab698b744bf6970bf210879c146391b8f0 +size 14836 diff --git a/data/2025/2504_05xxx/2504.05979/images/0d51b383aa6e1f47aa6ac0e33d09504e867f5f452a0b912bdf9c993b4e2622b3.jpg b/data/2025/2504_05xxx/2504.05979/images/0d51b383aa6e1f47aa6ac0e33d09504e867f5f452a0b912bdf9c993b4e2622b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34da05bd803690721a68d3b2628abf75a14f1b42 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0d51b383aa6e1f47aa6ac0e33d09504e867f5f452a0b912bdf9c993b4e2622b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbdd7e5e028737a66e726488beefdca789a88370548235cd71b66466ff59651f +size 5672 diff --git a/data/2025/2504_05xxx/2504.05979/images/0d5ac0ba593b14c0ff43464e9d8cb2a78e3c44bedcbbb29c36a51144ad9c8c34.jpg b/data/2025/2504_05xxx/2504.05979/images/0d5ac0ba593b14c0ff43464e9d8cb2a78e3c44bedcbbb29c36a51144ad9c8c34.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d11a3a5fbee6dad17e35f009dfe4f7f40c3ca90 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0d5ac0ba593b14c0ff43464e9d8cb2a78e3c44bedcbbb29c36a51144ad9c8c34.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6c7a43ed4a1007d97bdd0a0f9388a2c90c9acd526f756c10e5c57e81c61b4cc +size 8830 diff --git a/data/2025/2504_05xxx/2504.05979/images/0d5d4b908f4ff3dbdff9941212fc68ab04e87f2ef75d6228897f56880dd27fc6.jpg b/data/2025/2504_05xxx/2504.05979/images/0d5d4b908f4ff3dbdff9941212fc68ab04e87f2ef75d6228897f56880dd27fc6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ff8d3f9ebd26bce3d751d96f31372108e1ce4c6a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0d5d4b908f4ff3dbdff9941212fc68ab04e87f2ef75d6228897f56880dd27fc6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:887a72f72dd260e2b75534a8e59eb72200b0c0405d96d6d7c1f07e1c49c0c352 +size 17342 diff --git a/data/2025/2504_05xxx/2504.05979/images/0d95f3d9522c913d7fb63c52805b36252551d9de67bab8edf492afd706660e7d.jpg b/data/2025/2504_05xxx/2504.05979/images/0d95f3d9522c913d7fb63c52805b36252551d9de67bab8edf492afd706660e7d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0cf3074480a254365c0c6b47f571bda39fbb6060 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0d95f3d9522c913d7fb63c52805b36252551d9de67bab8edf492afd706660e7d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab51f1ba85fd49ce1e6b7a6c7fd9a36cb0e0f2ec04cd024d129004bb7aa00c97 +size 13272 diff --git a/data/2025/2504_05xxx/2504.05979/images/0e3a0dc448dd3f289f1837b24241f0ae490d1d79799115f52e41475efc038437.jpg b/data/2025/2504_05xxx/2504.05979/images/0e3a0dc448dd3f289f1837b24241f0ae490d1d79799115f52e41475efc038437.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea8016a682aea2153a305155deedea5a958246c2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0e3a0dc448dd3f289f1837b24241f0ae490d1d79799115f52e41475efc038437.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df2353e3baf699137c52ee22fa2143311370d6cd30b1ceb12cb387af247dc04f +size 17014 diff --git a/data/2025/2504_05xxx/2504.05979/images/0e6d7e6671a48677bfae227082d4c8fb8b48df29d71b55960f2c402f7f628630.jpg b/data/2025/2504_05xxx/2504.05979/images/0e6d7e6671a48677bfae227082d4c8fb8b48df29d71b55960f2c402f7f628630.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b84b5e478de0392d79908843147b8d6877a7502d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0e6d7e6671a48677bfae227082d4c8fb8b48df29d71b55960f2c402f7f628630.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:771886d01001894956f90e08fcbb6feead3a6341027824509c31c425f7cf0832 +size 9086 diff --git a/data/2025/2504_05xxx/2504.05979/images/0ec6090994cbd2295c92ceaabd8108bda9fc1d44cff795793c63c2f03a6f1419.jpg b/data/2025/2504_05xxx/2504.05979/images/0ec6090994cbd2295c92ceaabd8108bda9fc1d44cff795793c63c2f03a6f1419.jpg new file mode 100644 index 0000000000000000000000000000000000000000..67c199373e91f39df9bedc54ab31ca6c4b0c0f25 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0ec6090994cbd2295c92ceaabd8108bda9fc1d44cff795793c63c2f03a6f1419.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db54c4cf687fd4aa6f93deb9c95cf8b91e5963fac5d22debe20256d305564452 +size 4942 diff --git a/data/2025/2504_05xxx/2504.05979/images/0f7ad2208e211f067756484bb7a6847038cf13549a2f34c8a13b12eeda49aad7.jpg b/data/2025/2504_05xxx/2504.05979/images/0f7ad2208e211f067756484bb7a6847038cf13549a2f34c8a13b12eeda49aad7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5e73091f2d647811dcd1e27fc3e306116e6998f1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0f7ad2208e211f067756484bb7a6847038cf13549a2f34c8a13b12eeda49aad7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74a082e1b7e5b1723481b09bc4b3acceaf603bef5f5d66fdfb64b2134b10fc21 +size 25537 diff --git a/data/2025/2504_05xxx/2504.05979/images/0f8a4c52b6db3fbfb6fe2b7d165d3273b2849687f1ef570d6810290c516025db.jpg b/data/2025/2504_05xxx/2504.05979/images/0f8a4c52b6db3fbfb6fe2b7d165d3273b2849687f1ef570d6810290c516025db.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b2492f5f8d68491448bce181843795c7dea48425 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0f8a4c52b6db3fbfb6fe2b7d165d3273b2849687f1ef570d6810290c516025db.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a4a34a49836443af4f9056f77e8129c80ffe5868a99efddab129eeab2a868c7 +size 8481 diff --git a/data/2025/2504_05xxx/2504.05979/images/0f95244911c8dc29653de6ffbe04b37fc4545e698fba41f0e9c612f8ef2eea1a.jpg b/data/2025/2504_05xxx/2504.05979/images/0f95244911c8dc29653de6ffbe04b37fc4545e698fba41f0e9c612f8ef2eea1a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d946c53cc1371d129e17f07aa02f1f0fc6482893 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0f95244911c8dc29653de6ffbe04b37fc4545e698fba41f0e9c612f8ef2eea1a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f342a58b9f6e578bbe6d49f08baeded660adc72b8d414b1d21d76b0774e2c01 +size 12373 diff --git a/data/2025/2504_05xxx/2504.05979/images/0fe2326eaaff906398f84779d9f2f8a9b656dcf843adbe8b635b0ebccc3b64c1.jpg b/data/2025/2504_05xxx/2504.05979/images/0fe2326eaaff906398f84779d9f2f8a9b656dcf843adbe8b635b0ebccc3b64c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eabcb8d821831546d3590b7bea93fce71fed64ea --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/0fe2326eaaff906398f84779d9f2f8a9b656dcf843adbe8b635b0ebccc3b64c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfce26ed50ad8fc3f0c41eb0235000e96905ddb44bc4120127aa6480851d92ba +size 11747 diff --git a/data/2025/2504_05xxx/2504.05979/images/107a161e8c64a21e6ae887d967ee6aa6bf1c940aafacc290e429d8deb9516754.jpg b/data/2025/2504_05xxx/2504.05979/images/107a161e8c64a21e6ae887d967ee6aa6bf1c940aafacc290e429d8deb9516754.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69ceb7e3def3b380ee8c16d061d907155b8daa0d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/107a161e8c64a21e6ae887d967ee6aa6bf1c940aafacc290e429d8deb9516754.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:909c894f0fc57a6b13a6010179b5f49277d5e46e6e5a55cc267189548ae181f7 +size 11913 diff --git a/data/2025/2504_05xxx/2504.05979/images/1086a817de2fda2fa73bc2221b337c167aa823711f8d87db081972a8c108577d.jpg b/data/2025/2504_05xxx/2504.05979/images/1086a817de2fda2fa73bc2221b337c167aa823711f8d87db081972a8c108577d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..635aa7f73d6d879abab714e6bee48c9113c70def --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1086a817de2fda2fa73bc2221b337c167aa823711f8d87db081972a8c108577d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47f11b2044360affb9c328e7e6efdaea157ebb0cd8a1eebbeb8846683a84ef5f +size 13905 diff --git a/data/2025/2504_05xxx/2504.05979/images/10e10b4b43504d485d1b89173a358ad873c0bb08494a06dc780f0f725b125906.jpg b/data/2025/2504_05xxx/2504.05979/images/10e10b4b43504d485d1b89173a358ad873c0bb08494a06dc780f0f725b125906.jpg new file mode 100644 index 0000000000000000000000000000000000000000..438982a9cb8610b910b7cab4547ded99f3aa05f6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/10e10b4b43504d485d1b89173a358ad873c0bb08494a06dc780f0f725b125906.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb896458ad29deabaf2dc589d85a82f62c3cfba9cfbf85c008ab017ceb353260 +size 19347 diff --git a/data/2025/2504_05xxx/2504.05979/images/10f93dc20ac60dddd7d9e915fac07726f076d66e2f81a27aa151917e0e62abcf.jpg b/data/2025/2504_05xxx/2504.05979/images/10f93dc20ac60dddd7d9e915fac07726f076d66e2f81a27aa151917e0e62abcf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fade18f2fc3fec212fb7295e12309694c239c4ef --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/10f93dc20ac60dddd7d9e915fac07726f076d66e2f81a27aa151917e0e62abcf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00362b116cdb44ce316ddb21d29cdf843d6349f9067fd59274d32d723ded75f3 +size 13911 diff --git a/data/2025/2504_05xxx/2504.05979/images/11029002e14154e37a105bc63a9acff74ed50d918df4d80c2a9f039305b6e741.jpg b/data/2025/2504_05xxx/2504.05979/images/11029002e14154e37a105bc63a9acff74ed50d918df4d80c2a9f039305b6e741.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9195a9a560a6aadb053dabd17d0bffe8fd4b4da7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/11029002e14154e37a105bc63a9acff74ed50d918df4d80c2a9f039305b6e741.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56b40a11da2b8e3fdd8cfb82d83525964a013ebf688f728442064d89fbb32776 +size 8750 diff --git a/data/2025/2504_05xxx/2504.05979/images/114bf170640575674f20a5f89dd3e1c35830102990e7d45e9fe0e42bf870b7d0.jpg b/data/2025/2504_05xxx/2504.05979/images/114bf170640575674f20a5f89dd3e1c35830102990e7d45e9fe0e42bf870b7d0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1b5d10de8971f5274f9917745884410997c9bf94 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/114bf170640575674f20a5f89dd3e1c35830102990e7d45e9fe0e42bf870b7d0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cf85ea7b6d8d19d2e297184cfabde139ea77e73109a2aa92b7c64f7488c38c4 +size 16758 diff --git a/data/2025/2504_05xxx/2504.05979/images/116d28e49cd90395347ff38e9ec8e1c04b768757b89b7a9a1d0a0f0f317b20f4.jpg b/data/2025/2504_05xxx/2504.05979/images/116d28e49cd90395347ff38e9ec8e1c04b768757b89b7a9a1d0a0f0f317b20f4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..81d241986439dc272877cb0b94429e0812ea4785 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/116d28e49cd90395347ff38e9ec8e1c04b768757b89b7a9a1d0a0f0f317b20f4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b84a9959d195f947722830cc8753cf8ed287c3e4ce83e01e8890d820d804e9d2 +size 17393 diff --git a/data/2025/2504_05xxx/2504.05979/images/1186cfc667444853a5ea105221e7465b842983362ddd848dab0f81318dea818f.jpg b/data/2025/2504_05xxx/2504.05979/images/1186cfc667444853a5ea105221e7465b842983362ddd848dab0f81318dea818f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b18de887431a62020b35953ec69342911c062c1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1186cfc667444853a5ea105221e7465b842983362ddd848dab0f81318dea818f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b44e05ce4424026d0db43d805a0f4924d81b27f7655b66e60c9d8144adb9e4eb +size 8273 diff --git a/data/2025/2504_05xxx/2504.05979/images/11fb36737f5588598b2c288b162d49548d2524ca0fb7dcafb49bbd7f1ef163a6.jpg b/data/2025/2504_05xxx/2504.05979/images/11fb36737f5588598b2c288b162d49548d2524ca0fb7dcafb49bbd7f1ef163a6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..842e44268359b4146b5ccda7863196789d0ad699 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/11fb36737f5588598b2c288b162d49548d2524ca0fb7dcafb49bbd7f1ef163a6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b2904132a4be02256e2b70849ee54630f5040ecd5ecb5732868ba4fbc3ec431 +size 4350 diff --git a/data/2025/2504_05xxx/2504.05979/images/123381a94924d4ad9fab326af061828f48aab18ef9bee3847f59fc56015e22fb.jpg b/data/2025/2504_05xxx/2504.05979/images/123381a94924d4ad9fab326af061828f48aab18ef9bee3847f59fc56015e22fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6886a5b1b7436531aac0a96dddf580467e6e0af8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/123381a94924d4ad9fab326af061828f48aab18ef9bee3847f59fc56015e22fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05f817d68cbc3402bee713708058273d8d0c336dbf426694d1378bbd08c56da8 +size 8101 diff --git a/data/2025/2504_05xxx/2504.05979/images/12d972a127e37a6219e4bd892eadd138cfbb736fb6916e3a8ea8c8cccd698774.jpg b/data/2025/2504_05xxx/2504.05979/images/12d972a127e37a6219e4bd892eadd138cfbb736fb6916e3a8ea8c8cccd698774.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4724f39e7a54c797fbd4d34796f8af4aa28fb3e9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/12d972a127e37a6219e4bd892eadd138cfbb736fb6916e3a8ea8c8cccd698774.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ee0d4f6ceeae1293ff4525dc65f58d3d38372de52ec9e314c19e95c1bee4ec9 +size 6009 diff --git a/data/2025/2504_05xxx/2504.05979/images/12fe1a849b28c956a4b37361bf45581ff6000a0b09f3ab5787b4647bbd9a3831.jpg b/data/2025/2504_05xxx/2504.05979/images/12fe1a849b28c956a4b37361bf45581ff6000a0b09f3ab5787b4647bbd9a3831.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ca0ae354a8be801e2f6619f7b2c498c57d18d26 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/12fe1a849b28c956a4b37361bf45581ff6000a0b09f3ab5787b4647bbd9a3831.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67d1532dcf950149a82c0188472edf9982b96ebb825fa94c7a348066108095e0 +size 10264 diff --git a/data/2025/2504_05xxx/2504.05979/images/136b68479499a83964864cd84b1bd318f9fd41257d6d18e9f8d976b00b3c4581.jpg b/data/2025/2504_05xxx/2504.05979/images/136b68479499a83964864cd84b1bd318f9fd41257d6d18e9f8d976b00b3c4581.jpg new file mode 100644 index 0000000000000000000000000000000000000000..95aabb5aad8c76da99d404ed5286f0a908cae8c6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/136b68479499a83964864cd84b1bd318f9fd41257d6d18e9f8d976b00b3c4581.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07d4b44ff19a03c45b99aae309c2f0555ff9a45e1ebe78398ed7d9070e3e412f +size 8664 diff --git a/data/2025/2504_05xxx/2504.05979/images/136b9b62bc4d4cdfd994c33d5283fe146823a2feb7d76000b30d2a22fbb628a3.jpg b/data/2025/2504_05xxx/2504.05979/images/136b9b62bc4d4cdfd994c33d5283fe146823a2feb7d76000b30d2a22fbb628a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..47c9cb73051608021793c04c11217ff72b5b3ec6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/136b9b62bc4d4cdfd994c33d5283fe146823a2feb7d76000b30d2a22fbb628a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f023af48e3c6d392c91d41a933de175c1b1ab26e977dc0d3c60a1311cbf26496 +size 10414 diff --git a/data/2025/2504_05xxx/2504.05979/images/1371bb3d27b320f551f1aebcf4ba83faae804c2369cc85b124cb8a6b9e8b9ec5.jpg b/data/2025/2504_05xxx/2504.05979/images/1371bb3d27b320f551f1aebcf4ba83faae804c2369cc85b124cb8a6b9e8b9ec5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7550907e3f17c0a181c418bd7f6a61dd25acb7e5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1371bb3d27b320f551f1aebcf4ba83faae804c2369cc85b124cb8a6b9e8b9ec5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a15ce7aec7a58271c8da91ed8f893b74bece822f2cbe76f9ec6574e0defda595 +size 11590 diff --git a/data/2025/2504_05xxx/2504.05979/images/13971dbe9a9a59527de67d029db2291646e369394e8bee5fe2ff780074999bab.jpg b/data/2025/2504_05xxx/2504.05979/images/13971dbe9a9a59527de67d029db2291646e369394e8bee5fe2ff780074999bab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4772bb4cf337c352cf549670657733e71e395877 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/13971dbe9a9a59527de67d029db2291646e369394e8bee5fe2ff780074999bab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98cc27011e0652a1db3d8278e24ea3e80cb6476f89a4d719eb95a88910d0dcf3 +size 9502 diff --git a/data/2025/2504_05xxx/2504.05979/images/145f3c5298c9786c810dc961b48e4e79d1a401610fd467498400ef8e5de81c24.jpg b/data/2025/2504_05xxx/2504.05979/images/145f3c5298c9786c810dc961b48e4e79d1a401610fd467498400ef8e5de81c24.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3d3aa6d48a6f885cc077d9c1da551d3ea57ee224 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/145f3c5298c9786c810dc961b48e4e79d1a401610fd467498400ef8e5de81c24.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8661d2ce9d2c022fba926c1f915b304afebeeb343f5711a19d882e71b0800c32 +size 11097 diff --git a/data/2025/2504_05xxx/2504.05979/images/1529d55096562ea1287a6e80927c823cf3ef85f6ff2fc349534df8add404dfe4.jpg b/data/2025/2504_05xxx/2504.05979/images/1529d55096562ea1287a6e80927c823cf3ef85f6ff2fc349534df8add404dfe4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eeb4396e5d05830ae5e44aa33e42d876d94cde83 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1529d55096562ea1287a6e80927c823cf3ef85f6ff2fc349534df8add404dfe4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0850da758f5c13e6a0ad3f494f08d55aeaecc17078f6adaa950a883c6eea5319 +size 6854 diff --git a/data/2025/2504_05xxx/2504.05979/images/15fc7c5a66f48467e502347e843669f0689718ebbba95832bb0c704a6633d0b7.jpg b/data/2025/2504_05xxx/2504.05979/images/15fc7c5a66f48467e502347e843669f0689718ebbba95832bb0c704a6633d0b7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..67f31cec4154db57524b0892d3bb678e7d4fdfb8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/15fc7c5a66f48467e502347e843669f0689718ebbba95832bb0c704a6633d0b7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:414352a0b30a66386f0a7a2d596e9a55c0eb9ed2af306d8e6d318109f5ec5b60 +size 10863 diff --git a/data/2025/2504_05xxx/2504.05979/images/1660f44ab6725c952d5b0bc43505f77c0d7cb4b552be04d6430ee2b25ec63d61.jpg b/data/2025/2504_05xxx/2504.05979/images/1660f44ab6725c952d5b0bc43505f77c0d7cb4b552be04d6430ee2b25ec63d61.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1af55c1f45dedcbd162c4c80b9168832cc872d2b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1660f44ab6725c952d5b0bc43505f77c0d7cb4b552be04d6430ee2b25ec63d61.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c94caacc6420f612ea59d123c9e7ba2e2da9fa176126113c19af66c8e2051972 +size 20049 diff --git a/data/2025/2504_05xxx/2504.05979/images/16dc34668ed85bc66c78fa13a9e59e97bd1551b7dcf1f770849b1c6ec45313ab.jpg b/data/2025/2504_05xxx/2504.05979/images/16dc34668ed85bc66c78fa13a9e59e97bd1551b7dcf1f770849b1c6ec45313ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2278824e136624d02408fb38333a41c09bc37999 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/16dc34668ed85bc66c78fa13a9e59e97bd1551b7dcf1f770849b1c6ec45313ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20a3d8ad66817a47388a91abaed00262b72b92c0b66b645f4e9f50ce3ae03ccb +size 8014 diff --git a/data/2025/2504_05xxx/2504.05979/images/16e0aacca78d0ddc7841c44b3666005097467f6eb11e4ce97e8f4b2ead39c10b.jpg b/data/2025/2504_05xxx/2504.05979/images/16e0aacca78d0ddc7841c44b3666005097467f6eb11e4ce97e8f4b2ead39c10b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8940a52944114ef8ec826213e535d0c41f080bb0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/16e0aacca78d0ddc7841c44b3666005097467f6eb11e4ce97e8f4b2ead39c10b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45fe204cd4d43c57c739c3e90094bb65bf7cabdd3f77ee658b2ea060795754b3 +size 10258 diff --git a/data/2025/2504_05xxx/2504.05979/images/17281215ee551aee19132f26e2f57e9634fc4c01686dbd233f99e38afdc31bac.jpg b/data/2025/2504_05xxx/2504.05979/images/17281215ee551aee19132f26e2f57e9634fc4c01686dbd233f99e38afdc31bac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe68a1db53075953cc9a2953dba294aab041e662 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/17281215ee551aee19132f26e2f57e9634fc4c01686dbd233f99e38afdc31bac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed6c99d72d5dc01a73a0fe43f61c60aecbaeb46b889799ace55ccb9caf0f96a7 +size 7477 diff --git a/data/2025/2504_05xxx/2504.05979/images/174139ab6dcc12cd6a99f039cfe71445d47612cb61b456c34e139a4d44ce5b27.jpg b/data/2025/2504_05xxx/2504.05979/images/174139ab6dcc12cd6a99f039cfe71445d47612cb61b456c34e139a4d44ce5b27.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9a0a532f5d89cb0214c3980ed2a332ec385a2339 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/174139ab6dcc12cd6a99f039cfe71445d47612cb61b456c34e139a4d44ce5b27.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b54d0f5572c55053bb169bdccf7e6a83135b3dbc061e52a45f0c09f6968eaf4c +size 14619 diff --git a/data/2025/2504_05xxx/2504.05979/images/1751faef203209061a6fae572da8f65fff10f9f74ef783748441d19f7f4a2be9.jpg b/data/2025/2504_05xxx/2504.05979/images/1751faef203209061a6fae572da8f65fff10f9f74ef783748441d19f7f4a2be9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..05a155c4bbe3472e8f70731e712fe5163983a6d9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1751faef203209061a6fae572da8f65fff10f9f74ef783748441d19f7f4a2be9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:737ea45b9d3bf7884d50f989a6340c53890a5b45e152d9ee10be79d90018755b +size 26244 diff --git a/data/2025/2504_05xxx/2504.05979/images/17ab50440ba1a2025044fd3cd0741266c996928780e88a97d4c4333b56dfbe5a.jpg b/data/2025/2504_05xxx/2504.05979/images/17ab50440ba1a2025044fd3cd0741266c996928780e88a97d4c4333b56dfbe5a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ed51abf52fe4636d218a9134559995505312dcb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/17ab50440ba1a2025044fd3cd0741266c996928780e88a97d4c4333b56dfbe5a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b34e68f7c98b1393088756fffb26b39079590e806bce5e85d2d983ee1a8d2ec4 +size 7995 diff --git a/data/2025/2504_05xxx/2504.05979/images/17df35e7ffc6198a7ee15be18d8062a7c6c3846eac54441a5167179ad1116281.jpg b/data/2025/2504_05xxx/2504.05979/images/17df35e7ffc6198a7ee15be18d8062a7c6c3846eac54441a5167179ad1116281.jpg new file mode 100644 index 0000000000000000000000000000000000000000..921d986daf7250be291b16a0bfcfcf40e743b973 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/17df35e7ffc6198a7ee15be18d8062a7c6c3846eac54441a5167179ad1116281.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd6df35e9ce93dd9fd5b7ef1f02d2d48d2cf6e486ca29ffda4412cc1baa40d42 +size 9027 diff --git a/data/2025/2504_05xxx/2504.05979/images/17e080b72e8be225cacc760b2d20b9fc982e4786f8527d6dc2ca0c3cbfbee8d2.jpg b/data/2025/2504_05xxx/2504.05979/images/17e080b72e8be225cacc760b2d20b9fc982e4786f8527d6dc2ca0c3cbfbee8d2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ccbc656bc12574f742a3790c08d04e9399bfad17 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/17e080b72e8be225cacc760b2d20b9fc982e4786f8527d6dc2ca0c3cbfbee8d2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8826049afa255e7702da3c57b107de035917a392793fd54188b3b94ca40d74f5 +size 5015 diff --git a/data/2025/2504_05xxx/2504.05979/images/180b6d2fbe08d1106795eea1f9a5165620b7ff33122c84b0e0c53f5d47bd52a9.jpg b/data/2025/2504_05xxx/2504.05979/images/180b6d2fbe08d1106795eea1f9a5165620b7ff33122c84b0e0c53f5d47bd52a9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a9b71af543d3c7c52f997dbd33a5a1ee9b0be93 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/180b6d2fbe08d1106795eea1f9a5165620b7ff33122c84b0e0c53f5d47bd52a9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d77918898f15ae7a257b01064ee3fb0fcbd54711208baea58ab1f823aab8cae +size 8780 diff --git a/data/2025/2504_05xxx/2504.05979/images/186181b2b63acba5d62a3720407b17286c90fad94c34b8ea5cef23a65cfbf1ec.jpg b/data/2025/2504_05xxx/2504.05979/images/186181b2b63acba5d62a3720407b17286c90fad94c34b8ea5cef23a65cfbf1ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ae50bcc1db74eaeafec0035816ccef1bfee520af --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/186181b2b63acba5d62a3720407b17286c90fad94c34b8ea5cef23a65cfbf1ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27c6c705ccc1334866c2f5a9309e50ed764db0cf478a367abc748b79c596856b +size 13400 diff --git a/data/2025/2504_05xxx/2504.05979/images/18ac5988622ec524574bc3cacfb9b7b32ff24f0458ab883762dd159f18758912.jpg b/data/2025/2504_05xxx/2504.05979/images/18ac5988622ec524574bc3cacfb9b7b32ff24f0458ab883762dd159f18758912.jpg new file mode 100644 index 0000000000000000000000000000000000000000..befc347590ed11a3e9737a94c9db7cb40706fb9d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/18ac5988622ec524574bc3cacfb9b7b32ff24f0458ab883762dd159f18758912.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba73109ebd9f44b1d2dd2b33dcd6ef8050bec5ccc7d3dc4cafcaf2018891aacc +size 14704 diff --git a/data/2025/2504_05xxx/2504.05979/images/18cac760bb10b2890aca59d644e60712db22b56fe52a9734a701f01dc680756b.jpg b/data/2025/2504_05xxx/2504.05979/images/18cac760bb10b2890aca59d644e60712db22b56fe52a9734a701f01dc680756b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..536739b4402ccba165bde516b4699691db93d972 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/18cac760bb10b2890aca59d644e60712db22b56fe52a9734a701f01dc680756b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00b0a618e847f0741f03da268bcf50db20f851a4e828b0464186bfd849f77e7f +size 9102 diff --git a/data/2025/2504_05xxx/2504.05979/images/192fef7db091bb823a304562be65f9026029dbec8cd7557e21fc725cf7a16820.jpg b/data/2025/2504_05xxx/2504.05979/images/192fef7db091bb823a304562be65f9026029dbec8cd7557e21fc725cf7a16820.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f5406aa8da947646ea0d464be42dd5a9258e27e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/192fef7db091bb823a304562be65f9026029dbec8cd7557e21fc725cf7a16820.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae6b5145deebc8a2155ac809a633a9581d8274b54b54c6420e60f5ce6eac65c6 +size 11059 diff --git a/data/2025/2504_05xxx/2504.05979/images/19ee65abb68b04bcc6de8f3231bd1dfc53040c57e2774f2a64269f4ba9efb4df.jpg b/data/2025/2504_05xxx/2504.05979/images/19ee65abb68b04bcc6de8f3231bd1dfc53040c57e2774f2a64269f4ba9efb4df.jpg new file mode 100644 index 0000000000000000000000000000000000000000..661bcf75d2337f7fa35dbdabfcd3c3f239afecf3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/19ee65abb68b04bcc6de8f3231bd1dfc53040c57e2774f2a64269f4ba9efb4df.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d874e4dd5720288226fc53afbf0c21f9c58fd064150e41765f6c6fe614d1608 +size 9031 diff --git a/data/2025/2504_05xxx/2504.05979/images/1a0a8291fc0eb54d480890af32530069802f1d52bc82f8da414ec15032e47f7b.jpg b/data/2025/2504_05xxx/2504.05979/images/1a0a8291fc0eb54d480890af32530069802f1d52bc82f8da414ec15032e47f7b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fa3c7a949d9c6627d8eec5fdf21736dc8c230990 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1a0a8291fc0eb54d480890af32530069802f1d52bc82f8da414ec15032e47f7b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41533f4f324ffa8153492f867780bcdffb5493da0642606e85c10b9dbbf77afd +size 6800 diff --git a/data/2025/2504_05xxx/2504.05979/images/1a0ac4c5dee91e1501338bfcaaf66863752c28b3f6438eb24488b2e78087a942.jpg b/data/2025/2504_05xxx/2504.05979/images/1a0ac4c5dee91e1501338bfcaaf66863752c28b3f6438eb24488b2e78087a942.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c718f0d656a7e9a56b19cc94228e3ef00662c61c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1a0ac4c5dee91e1501338bfcaaf66863752c28b3f6438eb24488b2e78087a942.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a7575e92ad03bb3f7d91dd0c70df8c666f10f2367d472496ba7af7a635921e0 +size 14121 diff --git a/data/2025/2504_05xxx/2504.05979/images/1a402093b931cc216473bf07b0f3c0350d4d6dad3092d0e84036254347477cea.jpg b/data/2025/2504_05xxx/2504.05979/images/1a402093b931cc216473bf07b0f3c0350d4d6dad3092d0e84036254347477cea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..330107979ad24251701be09eac3b3690025a6af2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1a402093b931cc216473bf07b0f3c0350d4d6dad3092d0e84036254347477cea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5326f89dc65490109cf1550156848186d4fbac975cf0e343c17a7dcd2c16d274 +size 3670 diff --git a/data/2025/2504_05xxx/2504.05979/images/1a4731938b542638df6ad94c956073db2b548c15e4b1b8114d5307a10c99ea73.jpg b/data/2025/2504_05xxx/2504.05979/images/1a4731938b542638df6ad94c956073db2b548c15e4b1b8114d5307a10c99ea73.jpg new file mode 100644 index 0000000000000000000000000000000000000000..91c5b8205c3fb47f7b91bde758f0b552d50f3ffc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1a4731938b542638df6ad94c956073db2b548c15e4b1b8114d5307a10c99ea73.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7e6d4748ec5ffa95b7f4dd7baa1d0efaab7e31992e309fa75741fa96515f762 +size 925 diff --git a/data/2025/2504_05xxx/2504.05979/images/1a4e0134e2181e538cfa34d801a9a29589af46e4564af10bde2b0a48bdef3123.jpg b/data/2025/2504_05xxx/2504.05979/images/1a4e0134e2181e538cfa34d801a9a29589af46e4564af10bde2b0a48bdef3123.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b65af38095f7e76cc6c9c238941df227916d92ed --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1a4e0134e2181e538cfa34d801a9a29589af46e4564af10bde2b0a48bdef3123.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:221deedab5109e6061ffc66c94ded567a84c71dfdd613d6a663b7b9be6944a7e +size 10060 diff --git a/data/2025/2504_05xxx/2504.05979/images/1a5b4b1774fdc23750f023c71c85c56ea578acf01ebde2ea68333a166999bf31.jpg b/data/2025/2504_05xxx/2504.05979/images/1a5b4b1774fdc23750f023c71c85c56ea578acf01ebde2ea68333a166999bf31.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5bcf379cf471d4d2ce4b0dcd33e6b20db6f05887 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1a5b4b1774fdc23750f023c71c85c56ea578acf01ebde2ea68333a166999bf31.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c72e9cd22b0042325d6b859d621e00c27675daedaedd89b3c6588bd6c4331dae +size 3929 diff --git a/data/2025/2504_05xxx/2504.05979/images/1a9a2fe4848dccc2cbdc43a21645b7d9153e4a783d4634d91cd4b7240855ca79.jpg b/data/2025/2504_05xxx/2504.05979/images/1a9a2fe4848dccc2cbdc43a21645b7d9153e4a783d4634d91cd4b7240855ca79.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea870ce1e9df73111879fce362d6a9767ebe4c18 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1a9a2fe4848dccc2cbdc43a21645b7d9153e4a783d4634d91cd4b7240855ca79.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74ba9909cb22204676ea378534ae25876b7e44f60e03ea3f279c6c95d439fd4e +size 895 diff --git a/data/2025/2504_05xxx/2504.05979/images/1b277bbf258a90079c84ec982452f71ac0f350730f9aea170064b358fe0eb923.jpg b/data/2025/2504_05xxx/2504.05979/images/1b277bbf258a90079c84ec982452f71ac0f350730f9aea170064b358fe0eb923.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3d6dc4031eb34cff662a52bf33d489cb24f577e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1b277bbf258a90079c84ec982452f71ac0f350730f9aea170064b358fe0eb923.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32345ebc50fe50224c8022028116073fd83ff0766129af66d03e65056e6f8ec6 +size 14041 diff --git a/data/2025/2504_05xxx/2504.05979/images/1b3602b90061d565f84e84ed00f2de84147c20f674c0bc022d3fa5b9bd5926a9.jpg b/data/2025/2504_05xxx/2504.05979/images/1b3602b90061d565f84e84ed00f2de84147c20f674c0bc022d3fa5b9bd5926a9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5e1cb61e6dccfc4c4d6d84dccac43f4a001730e6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1b3602b90061d565f84e84ed00f2de84147c20f674c0bc022d3fa5b9bd5926a9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03756ca0f13c24a5413d7e9473a5478590eface7454f94f29bd187bb04bf8292 +size 9809 diff --git a/data/2025/2504_05xxx/2504.05979/images/1b62b31f138cbdfe4ceec53b959cee36fc3c5f0c22524ab44623a5cd565ac9c8.jpg b/data/2025/2504_05xxx/2504.05979/images/1b62b31f138cbdfe4ceec53b959cee36fc3c5f0c22524ab44623a5cd565ac9c8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f9dddb6b81e1face77f1f2c5aae1abb43e825a70 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1b62b31f138cbdfe4ceec53b959cee36fc3c5f0c22524ab44623a5cd565ac9c8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:027af9f420ff24eb8cb645bf21018767a83f97dc3449411d2ff941f38fe62fdf +size 17280 diff --git a/data/2025/2504_05xxx/2504.05979/images/1b8601f013b93c27baf25e41c09132b903155d83d0ba6b9bdcd83e2b33d24788.jpg b/data/2025/2504_05xxx/2504.05979/images/1b8601f013b93c27baf25e41c09132b903155d83d0ba6b9bdcd83e2b33d24788.jpg new file mode 100644 index 0000000000000000000000000000000000000000..391f7ccebc6efe0af3686ad5422d1f657327ad1c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1b8601f013b93c27baf25e41c09132b903155d83d0ba6b9bdcd83e2b33d24788.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50c63d8d1c4b44d23383ec11251840e5961b04aeb1a577203ad99ea3c4e7beaf +size 12172 diff --git a/data/2025/2504_05xxx/2504.05979/images/1b8d6a02d0d6267d2cb4821bda728d32861a5521511305a19499be24186e0a29.jpg b/data/2025/2504_05xxx/2504.05979/images/1b8d6a02d0d6267d2cb4821bda728d32861a5521511305a19499be24186e0a29.jpg new file mode 100644 index 0000000000000000000000000000000000000000..236e515b8f80bde03e2eaff8c90bec558442a425 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1b8d6a02d0d6267d2cb4821bda728d32861a5521511305a19499be24186e0a29.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a0720b01231fabc2e6928e80dd720d657dda59b59ec9ca50837b31806664f77 +size 6929 diff --git a/data/2025/2504_05xxx/2504.05979/images/1bc679f32fbc6bb7c76fac345ab8d2e03f52eb51f303c9abdaaec3738e3b8348.jpg b/data/2025/2504_05xxx/2504.05979/images/1bc679f32fbc6bb7c76fac345ab8d2e03f52eb51f303c9abdaaec3738e3b8348.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e14753a8158d8f98e21bad407c50f4896ba7355 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1bc679f32fbc6bb7c76fac345ab8d2e03f52eb51f303c9abdaaec3738e3b8348.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:617945a714bff0a42e38017dd4651c32b2f8e85a5da6db997af715ebb85668f9 +size 13168 diff --git a/data/2025/2504_05xxx/2504.05979/images/1bdda49b07b096341fdec95b3c342e834c5c3255bfacb631d015bf9dcbf57d2b.jpg b/data/2025/2504_05xxx/2504.05979/images/1bdda49b07b096341fdec95b3c342e834c5c3255bfacb631d015bf9dcbf57d2b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..99c6b97c6834999e320bd2073bc6bc9db56416be --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1bdda49b07b096341fdec95b3c342e834c5c3255bfacb631d015bf9dcbf57d2b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e879a7816326bf478b89264e563a8edddd71e240ef5279ea8c40b7bf18d0d81 +size 8074 diff --git a/data/2025/2504_05xxx/2504.05979/images/1beb76da0b2a004a33d76279d80831d9cbb3917b5daee0d2aa13d05feb493089.jpg b/data/2025/2504_05xxx/2504.05979/images/1beb76da0b2a004a33d76279d80831d9cbb3917b5daee0d2aa13d05feb493089.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa6aa60d82baf15d8d5ca3bf0f6feabbd2eaad5a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1beb76da0b2a004a33d76279d80831d9cbb3917b5daee0d2aa13d05feb493089.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55313c9c84b63e2b86e70f113b7ca7bfd33333ccfba71ec073e95bb3787e8dc3 +size 10236 diff --git a/data/2025/2504_05xxx/2504.05979/images/1bef728ebb6bc7f9c952867864380f4412a3ff879334ec345c1602238ad09c54.jpg b/data/2025/2504_05xxx/2504.05979/images/1bef728ebb6bc7f9c952867864380f4412a3ff879334ec345c1602238ad09c54.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8dbf8da2efc43bb9856276bf89e84c5cc7ab7bd9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1bef728ebb6bc7f9c952867864380f4412a3ff879334ec345c1602238ad09c54.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f9aa1802e951b61b5fafe953d22fae251e0353f8e8d0e525c4c3b76d3194671 +size 16074 diff --git a/data/2025/2504_05xxx/2504.05979/images/1c5838ea94c22768e1999617b5ceda45f55d61e50d2423844f55c0de5fe7e177.jpg b/data/2025/2504_05xxx/2504.05979/images/1c5838ea94c22768e1999617b5ceda45f55d61e50d2423844f55c0de5fe7e177.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f9c549a6714b613a87523786d446ce84f782441 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1c5838ea94c22768e1999617b5ceda45f55d61e50d2423844f55c0de5fe7e177.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3923b3b63a0736a94e873b4548e8a76c88d606c40806a1cf15f061275867d89 +size 5713 diff --git a/data/2025/2504_05xxx/2504.05979/images/1c6fcddb57e3da5cf397a2753308ce3877a635582a5355e1b66875d5fb6b8636.jpg b/data/2025/2504_05xxx/2504.05979/images/1c6fcddb57e3da5cf397a2753308ce3877a635582a5355e1b66875d5fb6b8636.jpg new file mode 100644 index 0000000000000000000000000000000000000000..153e0153cd39b44fdaba60c8786c2c6f9bd1c2db --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1c6fcddb57e3da5cf397a2753308ce3877a635582a5355e1b66875d5fb6b8636.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55f8e3e33860f717bd4472f8928ae4d8d840dd754575cca37328e96520b8bfb9 +size 3685 diff --git a/data/2025/2504_05xxx/2504.05979/images/1c79b6d712c77204bf93747fafc74f99e0ed9cd9216c159ddbe9f67834fa0ee5.jpg b/data/2025/2504_05xxx/2504.05979/images/1c79b6d712c77204bf93747fafc74f99e0ed9cd9216c159ddbe9f67834fa0ee5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a271d1cf9aacd848750173565c879325e26d0110 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1c79b6d712c77204bf93747fafc74f99e0ed9cd9216c159ddbe9f67834fa0ee5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e468efd1c0b82886ea3387d70d1ced324b3fc5d804e64e82a29aa31d5c8fa95d +size 13409 diff --git a/data/2025/2504_05xxx/2504.05979/images/1c82b96334f0b7ff5da3c6643db4451a39f618d24f04fa9a677945a841872526.jpg b/data/2025/2504_05xxx/2504.05979/images/1c82b96334f0b7ff5da3c6643db4451a39f618d24f04fa9a677945a841872526.jpg new file mode 100644 index 0000000000000000000000000000000000000000..98fa6ce65ba079ec3c26c97540917f880ce23db9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1c82b96334f0b7ff5da3c6643db4451a39f618d24f04fa9a677945a841872526.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34cd7b56bcaab80181114edbec2c225fd122f3d99a744c1b566dd2178a496df4 +size 11155 diff --git a/data/2025/2504_05xxx/2504.05979/images/1ccf7d1613f12e0b81fba30f86e01357733d489b633304f1597188ed4f258e67.jpg b/data/2025/2504_05xxx/2504.05979/images/1ccf7d1613f12e0b81fba30f86e01357733d489b633304f1597188ed4f258e67.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b35480e3399ae6275ab350d629af4241761828f9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1ccf7d1613f12e0b81fba30f86e01357733d489b633304f1597188ed4f258e67.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cde749d3ede3a533a2720b1d87dd307b1c99e9ee1d133428c2c60683945c486a +size 16334 diff --git a/data/2025/2504_05xxx/2504.05979/images/1cf0ebdbd8d076d3769a4ec23e0b1f57389127e1424e5db6a56d16c052155351.jpg b/data/2025/2504_05xxx/2504.05979/images/1cf0ebdbd8d076d3769a4ec23e0b1f57389127e1424e5db6a56d16c052155351.jpg new file mode 100644 index 0000000000000000000000000000000000000000..caff0768d0064b810bab38679409f0b9fa54da7e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1cf0ebdbd8d076d3769a4ec23e0b1f57389127e1424e5db6a56d16c052155351.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9326d13940838f5bd659a1ff80d4622788c277db4108acb5ab3c2150e37a0432 +size 6853 diff --git a/data/2025/2504_05xxx/2504.05979/images/1d5ece3f0e15c3834d92639316498ad868ae535068088c79e13b0ab393f5994b.jpg b/data/2025/2504_05xxx/2504.05979/images/1d5ece3f0e15c3834d92639316498ad868ae535068088c79e13b0ab393f5994b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..17d51fd1a9feb276fc7405a654f2ccf47e0d007f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1d5ece3f0e15c3834d92639316498ad868ae535068088c79e13b0ab393f5994b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e124f526265336fed3ddd2bd298821ca0a970f2c6c8ce23605f23ede7dc59c9f +size 7164 diff --git a/data/2025/2504_05xxx/2504.05979/images/1d6b2ece4089619c8dc3c3901314db44c609491a2174435fb009e552ab953e21.jpg b/data/2025/2504_05xxx/2504.05979/images/1d6b2ece4089619c8dc3c3901314db44c609491a2174435fb009e552ab953e21.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a21db9ecdd5fcde26679bca021e0670bec48595e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1d6b2ece4089619c8dc3c3901314db44c609491a2174435fb009e552ab953e21.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7d23ca202644a9c339a677f8357fc5607c9e0f38bf46255731a887b994ac3ef +size 14772 diff --git a/data/2025/2504_05xxx/2504.05979/images/1d6da19c3cbec9b2d439612a2a8553113f28a4397139ddfe38d9848ae7430cc6.jpg b/data/2025/2504_05xxx/2504.05979/images/1d6da19c3cbec9b2d439612a2a8553113f28a4397139ddfe38d9848ae7430cc6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c3fe7f826913dcb8bf4a3b1e67ad3b80f22c8f49 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1d6da19c3cbec9b2d439612a2a8553113f28a4397139ddfe38d9848ae7430cc6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67382690cb9592fb493f4bf093eaa6753b4e70b82bba6e9020f55aed5df5c844 +size 8244 diff --git a/data/2025/2504_05xxx/2504.05979/images/1d87a3ecf1aa5cd61e4772d303263ffdce0edea148e97f37a796904330cd58ee.jpg b/data/2025/2504_05xxx/2504.05979/images/1d87a3ecf1aa5cd61e4772d303263ffdce0edea148e97f37a796904330cd58ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..457e934757facf00223507dae8fe9aeb4675ea70 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1d87a3ecf1aa5cd61e4772d303263ffdce0edea148e97f37a796904330cd58ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffab9894e5a22b53d664c982d8878c1d62cb9e72dffb362cffa68e5cc3ab057b +size 6708 diff --git a/data/2025/2504_05xxx/2504.05979/images/1dcf8e8781f2caee3cccfcec05a958f963d7001ad1378b322b20be96f757857e.jpg b/data/2025/2504_05xxx/2504.05979/images/1dcf8e8781f2caee3cccfcec05a958f963d7001ad1378b322b20be96f757857e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7894fae71de79c03fde59628569309e60794f44a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1dcf8e8781f2caee3cccfcec05a958f963d7001ad1378b322b20be96f757857e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be10361de4d12dee6037bdbd31b4da6c9b476b741082b22b64d8e6eb2058e6cd +size 10431 diff --git a/data/2025/2504_05xxx/2504.05979/images/1e13756c7b144754ea9efd5eae0b5683845af5444f13a90c830d2492dfbd9bbd.jpg b/data/2025/2504_05xxx/2504.05979/images/1e13756c7b144754ea9efd5eae0b5683845af5444f13a90c830d2492dfbd9bbd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..317ddc37196f2c1815b8d6f5678fefa5642cab7c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1e13756c7b144754ea9efd5eae0b5683845af5444f13a90c830d2492dfbd9bbd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca87b73815335397e487a24add88220929462216da1b1f7498fa2b472cd8cfb5 +size 9744 diff --git a/data/2025/2504_05xxx/2504.05979/images/1e393f9e4a534fd74efa40049b2b251a874ba84af7a4ec4b9bafb559308b436f.jpg b/data/2025/2504_05xxx/2504.05979/images/1e393f9e4a534fd74efa40049b2b251a874ba84af7a4ec4b9bafb559308b436f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..13dce00646811679f71161b0653fbc9175b593bc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1e393f9e4a534fd74efa40049b2b251a874ba84af7a4ec4b9bafb559308b436f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a22c82cf60572d3e115ef183150e71f3f7e2ac05cb12d34b27249dbadf734f3b +size 26420 diff --git a/data/2025/2504_05xxx/2504.05979/images/1e842cfe1f25dd038d061f3ab7dd7da184ce007a8b89e10fd9297ee4bd1d051c.jpg b/data/2025/2504_05xxx/2504.05979/images/1e842cfe1f25dd038d061f3ab7dd7da184ce007a8b89e10fd9297ee4bd1d051c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..805d1897b46b638021a42fb2ac01c4469eb4ec52 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1e842cfe1f25dd038d061f3ab7dd7da184ce007a8b89e10fd9297ee4bd1d051c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c2b60eba6153edc35a524cab7586785ed0c06d00859ea037c9f2e4958868f95 +size 13317 diff --git a/data/2025/2504_05xxx/2504.05979/images/1e96cf20eb72b581b66ee41d6ac4ce09a54c1964332e8249d136a88fe87afae1.jpg b/data/2025/2504_05xxx/2504.05979/images/1e96cf20eb72b581b66ee41d6ac4ce09a54c1964332e8249d136a88fe87afae1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..33679d0f254b8c14c262e51195c4730f565c253d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1e96cf20eb72b581b66ee41d6ac4ce09a54c1964332e8249d136a88fe87afae1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98d6e78c27f9676fe7495bb1325361430bd860dcc8a2b04f4b70f443be5b7451 +size 12673 diff --git a/data/2025/2504_05xxx/2504.05979/images/1f17bc39103c2c1bcbdf239933f68441984fad34a4c78c96991f6e9a246af36a.jpg b/data/2025/2504_05xxx/2504.05979/images/1f17bc39103c2c1bcbdf239933f68441984fad34a4c78c96991f6e9a246af36a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..202e737f0ab22f1c4bb7a79df0565eb746d2af71 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1f17bc39103c2c1bcbdf239933f68441984fad34a4c78c96991f6e9a246af36a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e529aace13ffd7d46bd24ec054271d419b87c9d995119e3cea80661afc370fab +size 10576 diff --git a/data/2025/2504_05xxx/2504.05979/images/1f674c14a05985ac60abfb2449ee82311b3fd74a5ce3e0baab98c7fbcc2f60c0.jpg b/data/2025/2504_05xxx/2504.05979/images/1f674c14a05985ac60abfb2449ee82311b3fd74a5ce3e0baab98c7fbcc2f60c0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ca4aace1a794ff52dde7401694e37e92af8827b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1f674c14a05985ac60abfb2449ee82311b3fd74a5ce3e0baab98c7fbcc2f60c0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ba48d73a2eed1add54ac6852d376d93939b770830cd6be64c8b6d12f1e308dd +size 8617 diff --git a/data/2025/2504_05xxx/2504.05979/images/1f67f57b31e0864c333e12afce49cff3e3a9e54902f8fabbbf522d8e1b2bcb07.jpg b/data/2025/2504_05xxx/2504.05979/images/1f67f57b31e0864c333e12afce49cff3e3a9e54902f8fabbbf522d8e1b2bcb07.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b413902ac28139a36407e107b0ee13267d943c1e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1f67f57b31e0864c333e12afce49cff3e3a9e54902f8fabbbf522d8e1b2bcb07.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1d0739fe1bca7b856df798640d6c519fe82ac404f807356561d4d90c1d5718a +size 10350 diff --git a/data/2025/2504_05xxx/2504.05979/images/1f7f8dc7c95580fa7127ddf404e77ee13666d31a7cb6fd3eb7e66943921a35a7.jpg b/data/2025/2504_05xxx/2504.05979/images/1f7f8dc7c95580fa7127ddf404e77ee13666d31a7cb6fd3eb7e66943921a35a7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2e0388430685d50fe3fb3d277a4a40a0e49f3b45 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1f7f8dc7c95580fa7127ddf404e77ee13666d31a7cb6fd3eb7e66943921a35a7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4cb988dd0b19334650c3631fc5c1119a1c025cee5b37cb6ac013add4fecc1c4d +size 11898 diff --git a/data/2025/2504_05xxx/2504.05979/images/1f9b0b011e88c337096f493dbdc16bbafefb87c7ac6d0817b51a02d65ae211a2.jpg b/data/2025/2504_05xxx/2504.05979/images/1f9b0b011e88c337096f493dbdc16bbafefb87c7ac6d0817b51a02d65ae211a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04e14a2d7c7be3893b544f45b24a2c6b8d60c1e5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1f9b0b011e88c337096f493dbdc16bbafefb87c7ac6d0817b51a02d65ae211a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5367b7b832064b9fe58c820cbeb767a7dff3a6b6d5abbc9546ef21986f15de89 +size 7571 diff --git a/data/2025/2504_05xxx/2504.05979/images/1fb2c646d1d34c3087486d5485c780d38c7e518b41b9210914f00d583ccfd2b1.jpg b/data/2025/2504_05xxx/2504.05979/images/1fb2c646d1d34c3087486d5485c780d38c7e518b41b9210914f00d583ccfd2b1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..51d63984a7a9bb259c15ad69f5660a85f2ff94c4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1fb2c646d1d34c3087486d5485c780d38c7e518b41b9210914f00d583ccfd2b1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c4d01ee5c9d6f37c442c58fc9be6d78e40abca530c5925aa31b7b886b39fdc2 +size 9534 diff --git a/data/2025/2504_05xxx/2504.05979/images/1fc3e7193935e21d941ba2a69b2a1621e26533675595f4e66648793cc5c521ca.jpg b/data/2025/2504_05xxx/2504.05979/images/1fc3e7193935e21d941ba2a69b2a1621e26533675595f4e66648793cc5c521ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..de44267fea12240c372a57194eca23cabfdd5ad4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/1fc3e7193935e21d941ba2a69b2a1621e26533675595f4e66648793cc5c521ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81ddd48737e54985825c47917a1153892c04c26dddf41fd2ce91116f6e547c05 +size 10599 diff --git a/data/2025/2504_05xxx/2504.05979/images/2043ba7207ad0d9c159f8b1abd4cb8a7575070b2f4ddccfd301508c5cbc7ce5a.jpg b/data/2025/2504_05xxx/2504.05979/images/2043ba7207ad0d9c159f8b1abd4cb8a7575070b2f4ddccfd301508c5cbc7ce5a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..431264aa508a44d12bad5844edc147d67c240e4b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2043ba7207ad0d9c159f8b1abd4cb8a7575070b2f4ddccfd301508c5cbc7ce5a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3dc516a3070d3ba20b3e609428837fcef2924af26bcd8fb7ce933552c0b424d8 +size 9297 diff --git a/data/2025/2504_05xxx/2504.05979/images/204fddddcbcd5e6532fa8831a9afe9f12c4bf879dee56a592fddb35d74766418.jpg b/data/2025/2504_05xxx/2504.05979/images/204fddddcbcd5e6532fa8831a9afe9f12c4bf879dee56a592fddb35d74766418.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6904a0ab24105ea64d93f0a60281b86e619dd5d5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/204fddddcbcd5e6532fa8831a9afe9f12c4bf879dee56a592fddb35d74766418.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20cccc975fae5605a1001a4a50f9a6e7a8d003d82d50d0f05cca1ffd43141f0f +size 31811 diff --git a/data/2025/2504_05xxx/2504.05979/images/208b4450c1e449eb5c1aec21ac1e69f0930c6ad52d64be0770fb28058b41a6de.jpg b/data/2025/2504_05xxx/2504.05979/images/208b4450c1e449eb5c1aec21ac1e69f0930c6ad52d64be0770fb28058b41a6de.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f87fdd00d18912f9dcd7b5464b94e928500acd4f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/208b4450c1e449eb5c1aec21ac1e69f0930c6ad52d64be0770fb28058b41a6de.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4e2076e12bfaa414588298feb35cd61cb4b6a4097246626aba35f530bfdc6b5 +size 19211 diff --git a/data/2025/2504_05xxx/2504.05979/images/20925173299f15c6d10bdcdcb9ec379e65b18d75af2a57d652be5a8ff0b8001d.jpg b/data/2025/2504_05xxx/2504.05979/images/20925173299f15c6d10bdcdcb9ec379e65b18d75af2a57d652be5a8ff0b8001d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..37a25363bb259781645d8b54d99146034d569983 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/20925173299f15c6d10bdcdcb9ec379e65b18d75af2a57d652be5a8ff0b8001d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a974be33280fe2f260e3040f66b775010739c9ff0947cc348785f256d4a01ceb +size 10382 diff --git a/data/2025/2504_05xxx/2504.05979/images/217541d90806a38fd995d3e22236061f85eb7bc63300a2acde2bd0f1910bb8aa.jpg b/data/2025/2504_05xxx/2504.05979/images/217541d90806a38fd995d3e22236061f85eb7bc63300a2acde2bd0f1910bb8aa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..00bf6168b84852a6ffa202a96d7f22ececb07070 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/217541d90806a38fd995d3e22236061f85eb7bc63300a2acde2bd0f1910bb8aa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96e3e9159fedfd725fd91f29b3c4afbb7dbeb18b156f841050e45947555ae34f +size 4952 diff --git a/data/2025/2504_05xxx/2504.05979/images/21754ab30160b46ae72d81bb54a4d2c4f2af0b2cb67b22e08bf3c41755442754.jpg b/data/2025/2504_05xxx/2504.05979/images/21754ab30160b46ae72d81bb54a4d2c4f2af0b2cb67b22e08bf3c41755442754.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d041ecf07c2fb5df3429d2009eb49200344b1e37 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/21754ab30160b46ae72d81bb54a4d2c4f2af0b2cb67b22e08bf3c41755442754.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9fe3c7b353a7c802eb7f5d565ffd0521c5c45d74b8917f2b48d1f416e1e4fff +size 6251 diff --git a/data/2025/2504_05xxx/2504.05979/images/234feaba9c9e1d0f65d67e6df566f551271d9b6bc16b92cc18990362a5b21b2f.jpg b/data/2025/2504_05xxx/2504.05979/images/234feaba9c9e1d0f65d67e6df566f551271d9b6bc16b92cc18990362a5b21b2f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94ddb675fdba380a96930958c51ae6a2de31e6bf --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/234feaba9c9e1d0f65d67e6df566f551271d9b6bc16b92cc18990362a5b21b2f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07d4ef16819a58d56a5091bbb1f147ef77591d79d25a3174686ebbf7f6f78478 +size 17390 diff --git a/data/2025/2504_05xxx/2504.05979/images/235e18e587aa841842c1dc8ffd53a330c1345d64b6c3875f51db1f215dc6fc92.jpg b/data/2025/2504_05xxx/2504.05979/images/235e18e587aa841842c1dc8ffd53a330c1345d64b6c3875f51db1f215dc6fc92.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ad50b782aa410e12c81eb98fc94e42e9af0af757 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/235e18e587aa841842c1dc8ffd53a330c1345d64b6c3875f51db1f215dc6fc92.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01ed78605721baf4d8bc3531eded86bd8beb3c756a10fcda99e089cd2bbed342 +size 17793 diff --git a/data/2025/2504_05xxx/2504.05979/images/23c403764728be45b17ee5db54c9ec0e5a6b239c1402350ba72598d480c17fbc.jpg b/data/2025/2504_05xxx/2504.05979/images/23c403764728be45b17ee5db54c9ec0e5a6b239c1402350ba72598d480c17fbc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d53ba13a798d4a9002624a9a3b7454aba225c8a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/23c403764728be45b17ee5db54c9ec0e5a6b239c1402350ba72598d480c17fbc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2bc9306e2e82fac6bdfbfaf3d939c58e44e714f3f316f1c6e55f6b8861fc431 +size 13685 diff --git a/data/2025/2504_05xxx/2504.05979/images/24ab3a835dd0ae72fb511f6dddb64f64bce8bf595463eb6cff06f05aa41f50a1.jpg b/data/2025/2504_05xxx/2504.05979/images/24ab3a835dd0ae72fb511f6dddb64f64bce8bf595463eb6cff06f05aa41f50a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bcad460637ea0b8154e60c81cf60f6958233931f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/24ab3a835dd0ae72fb511f6dddb64f64bce8bf595463eb6cff06f05aa41f50a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9479d12a2438c412887c49d020500d8c81c6ad36302b9a31e2e42a15bdaea7b6 +size 6923 diff --git a/data/2025/2504_05xxx/2504.05979/images/24acb3bb969c56cf9068bc75a47163bb4b2fd5cfb83a6ae51eec791707338fa8.jpg b/data/2025/2504_05xxx/2504.05979/images/24acb3bb969c56cf9068bc75a47163bb4b2fd5cfb83a6ae51eec791707338fa8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4c17ddf559218edaa8e478cab5b81a469920dd7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/24acb3bb969c56cf9068bc75a47163bb4b2fd5cfb83a6ae51eec791707338fa8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f93f0296d5504c427a6bc0d939ef2aa7b263f25cde799cc141a812124cd730a +size 9193 diff --git a/data/2025/2504_05xxx/2504.05979/images/25046cf08634831335f437686635f7b1affeb5cf872dfd9bdeef73ad97a4f786.jpg b/data/2025/2504_05xxx/2504.05979/images/25046cf08634831335f437686635f7b1affeb5cf872dfd9bdeef73ad97a4f786.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8435b0f96f026740585cbd0a02a2a9d34902970c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/25046cf08634831335f437686635f7b1affeb5cf872dfd9bdeef73ad97a4f786.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4185c081366dcf7ce121ff40120ac5374ff45b2cd565837926d2435312bb8236 +size 25723 diff --git a/data/2025/2504_05xxx/2504.05979/images/25c6f8568e7bcdc44e6309534f95b1a0b59bb18d7773a69a2d061ee65e297860.jpg b/data/2025/2504_05xxx/2504.05979/images/25c6f8568e7bcdc44e6309534f95b1a0b59bb18d7773a69a2d061ee65e297860.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ef2dcba608c098eca5aac0c52bd97005da8d9cb9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/25c6f8568e7bcdc44e6309534f95b1a0b59bb18d7773a69a2d061ee65e297860.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58e485f7c7faef5e0f98e65dcd679ae4eb7568564f85d20248be431ac54abf74 +size 11317 diff --git a/data/2025/2504_05xxx/2504.05979/images/2632b121866020ffde6bdceee821e4d81430159476d96e0e01139dc9d082ba70.jpg b/data/2025/2504_05xxx/2504.05979/images/2632b121866020ffde6bdceee821e4d81430159476d96e0e01139dc9d082ba70.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9ab5a560dcfef0e04329136722d8a6e6d0fb20c4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2632b121866020ffde6bdceee821e4d81430159476d96e0e01139dc9d082ba70.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59360672647f28270f5fb3c12fcad56b690d4533bbfb9081c63c4ed34aee89fe +size 14362 diff --git a/data/2025/2504_05xxx/2504.05979/images/26dcc1fa3b38be01934c8e24ea493d1a80141258eb76a34f67d30c186588e554.jpg b/data/2025/2504_05xxx/2504.05979/images/26dcc1fa3b38be01934c8e24ea493d1a80141258eb76a34f67d30c186588e554.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe378037f74b460d90f1932ce19f9d5aada0752c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/26dcc1fa3b38be01934c8e24ea493d1a80141258eb76a34f67d30c186588e554.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50a13571f6156a1931f99b14f87e9c9f406ec0ebb74f88d8cb9d2306b64997d8 +size 9944 diff --git a/data/2025/2504_05xxx/2504.05979/images/273a3c5b1d1bd94335184aafbed95bd03cde1025a92f44aa700ed255ea3c88e5.jpg b/data/2025/2504_05xxx/2504.05979/images/273a3c5b1d1bd94335184aafbed95bd03cde1025a92f44aa700ed255ea3c88e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4515c74e8a79f96daa29695fa7ebb0ae37af30b3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/273a3c5b1d1bd94335184aafbed95bd03cde1025a92f44aa700ed255ea3c88e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68faad3e3d7c2754b2383c31eaa00ed581c8e4a110beea38fec1b29bfa0fcb18 +size 15116 diff --git a/data/2025/2504_05xxx/2504.05979/images/275a5a4359f968af20b14db343e03cc70f52a75880ac223fb413656701f9803b.jpg b/data/2025/2504_05xxx/2504.05979/images/275a5a4359f968af20b14db343e03cc70f52a75880ac223fb413656701f9803b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bc1ed08d7aea2eccc9a0b0ef218c703a2c9a8791 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/275a5a4359f968af20b14db343e03cc70f52a75880ac223fb413656701f9803b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c864a5e3b9fdb8166a8078f0d2fc0f260159cae8296dc5607f344818f9125b60 +size 17468 diff --git a/data/2025/2504_05xxx/2504.05979/images/2766a0e15a98adf29692d023a9e4c39fef5c3e5ec591c397ca4c3600d9eb3690.jpg b/data/2025/2504_05xxx/2504.05979/images/2766a0e15a98adf29692d023a9e4c39fef5c3e5ec591c397ca4c3600d9eb3690.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5322c2540cb7f50646c5ee1edddbcece2c4f8449 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2766a0e15a98adf29692d023a9e4c39fef5c3e5ec591c397ca4c3600d9eb3690.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f266b801d5fc9d869f8216013d9c9a383f191fe7dccaf148650c6dba1d4865a1 +size 10043 diff --git a/data/2025/2504_05xxx/2504.05979/images/27b3efd0800340d8acae187e104fb4c863298773ee0e13233df9fd4a8ff2e810.jpg b/data/2025/2504_05xxx/2504.05979/images/27b3efd0800340d8acae187e104fb4c863298773ee0e13233df9fd4a8ff2e810.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3eb0a560b5c8894e3d06508d96ef4dad6f9fd0e7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/27b3efd0800340d8acae187e104fb4c863298773ee0e13233df9fd4a8ff2e810.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0ee156a3175c6dc8e5fcc10f56eed8bab2e657e1af8a79f127dc733e93c2c32 +size 11816 diff --git a/data/2025/2504_05xxx/2504.05979/images/27c5eea52e6ca0c737ae2cf46a953e11f8a44064e52b132ad803df249ab60e5c.jpg b/data/2025/2504_05xxx/2504.05979/images/27c5eea52e6ca0c737ae2cf46a953e11f8a44064e52b132ad803df249ab60e5c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..99b4d60df59b8f28408484324bff20e03bbed737 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/27c5eea52e6ca0c737ae2cf46a953e11f8a44064e52b132ad803df249ab60e5c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0179bf95bacb258e90b576db214216cde2b277b1f6497a57b19b8c0f10801fe7 +size 16539 diff --git a/data/2025/2504_05xxx/2504.05979/images/27fce9400300fb184ed43e1e4b4f8eb042ae16f63646b6eda324b41fff54aede.jpg b/data/2025/2504_05xxx/2504.05979/images/27fce9400300fb184ed43e1e4b4f8eb042ae16f63646b6eda324b41fff54aede.jpg new file mode 100644 index 0000000000000000000000000000000000000000..05ec75a8b682dd5747b21cb014142003b3cd7cd8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/27fce9400300fb184ed43e1e4b4f8eb042ae16f63646b6eda324b41fff54aede.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90e89793b3efaeb870b711e8d21686c9199a59fbf3d5b9132dd1e81203941e46 +size 17942 diff --git a/data/2025/2504_05xxx/2504.05979/images/28038c53ecd04cd0c7f8a9e7042077716a5c924fd1bcd4bdb4dc296a5e381da1.jpg b/data/2025/2504_05xxx/2504.05979/images/28038c53ecd04cd0c7f8a9e7042077716a5c924fd1bcd4bdb4dc296a5e381da1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec036f96a2329cb914224a8c87c779580cd90be0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/28038c53ecd04cd0c7f8a9e7042077716a5c924fd1bcd4bdb4dc296a5e381da1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:744dcd45cc17fdd1f38cf6c5d28f2e1f604f54083018e1314784944c84b1ce17 +size 1020 diff --git a/data/2025/2504_05xxx/2504.05979/images/285c8caa7cc67cd2c1b2e70ff31cb3ba4484255739d14c8d701b34ac2e79ec36.jpg b/data/2025/2504_05xxx/2504.05979/images/285c8caa7cc67cd2c1b2e70ff31cb3ba4484255739d14c8d701b34ac2e79ec36.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fa6e8905eb37af5b1137e2c4ca2a45236ff8a7b2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/285c8caa7cc67cd2c1b2e70ff31cb3ba4484255739d14c8d701b34ac2e79ec36.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1dfb35c235e21d2160a9df88ee0402957e17e7cfcb08eaf7a607d018921d6bae +size 10729 diff --git a/data/2025/2504_05xxx/2504.05979/images/2864e1c72caa29f54357f917cad03463a7b5f8e666314b153768cc9c9bd444d6.jpg b/data/2025/2504_05xxx/2504.05979/images/2864e1c72caa29f54357f917cad03463a7b5f8e666314b153768cc9c9bd444d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6be94088db22fe788ec92c7cd35e719b6493f3c1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2864e1c72caa29f54357f917cad03463a7b5f8e666314b153768cc9c9bd444d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a869b50518e39b87ee61bcf991ce58c5ad7209ad86c4f62bc3856f6bb07d328 +size 8593 diff --git a/data/2025/2504_05xxx/2504.05979/images/2865da09f43ccfb7bad4c1a38f720f8a7f22179ea35f92a1539146184df21cd6.jpg b/data/2025/2504_05xxx/2504.05979/images/2865da09f43ccfb7bad4c1a38f720f8a7f22179ea35f92a1539146184df21cd6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f90693dfe6dbfa7ed3bc3b69221973de800ea3bb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2865da09f43ccfb7bad4c1a38f720f8a7f22179ea35f92a1539146184df21cd6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64d9545605b5aa8397bf353b52dfac6add161311c33b45a50efb6e8df8a14dff +size 20509 diff --git a/data/2025/2504_05xxx/2504.05979/images/2865dc07722a294ea85dfd9841947ab7e799e3d91a0f4a9092b0ff0c6fddd5ac.jpg b/data/2025/2504_05xxx/2504.05979/images/2865dc07722a294ea85dfd9841947ab7e799e3d91a0f4a9092b0ff0c6fddd5ac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..35b0ee4c97f0542ca2768f931ab678721af975b9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2865dc07722a294ea85dfd9841947ab7e799e3d91a0f4a9092b0ff0c6fddd5ac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1ba0aad310fe8a16796c2d12c36031da7206c3af1039a7a63aa859d77e5035a +size 13338 diff --git a/data/2025/2504_05xxx/2504.05979/images/2884734e16a9fec622bef494287b9ffd3440adb4d2a9f14797db74ec8bd44225.jpg b/data/2025/2504_05xxx/2504.05979/images/2884734e16a9fec622bef494287b9ffd3440adb4d2a9f14797db74ec8bd44225.jpg new file mode 100644 index 0000000000000000000000000000000000000000..352a613e2fe62df5933b75094eaec954c641ec64 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2884734e16a9fec622bef494287b9ffd3440adb4d2a9f14797db74ec8bd44225.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:320e224704b0010261824a94b53720b9923b39e900119b79485cc5b553fe3421 +size 14566 diff --git a/data/2025/2504_05xxx/2504.05979/images/288c7e6c2ba7c4828a38dbb2d93551531e7662268ca80529efd05e46c3b9cdc5.jpg b/data/2025/2504_05xxx/2504.05979/images/288c7e6c2ba7c4828a38dbb2d93551531e7662268ca80529efd05e46c3b9cdc5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..66b6035ec5cd172e080c7b52f0f9ba5843804589 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/288c7e6c2ba7c4828a38dbb2d93551531e7662268ca80529efd05e46c3b9cdc5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2bb97b84bf402bac4d7af99f08d23dd81e54fe038deee48cf9ab595f0fe11216 +size 8924 diff --git a/data/2025/2504_05xxx/2504.05979/images/288f52547eff2bdfad27eac3648ba1b7a88af6beceffdce0913c9495832f57fb.jpg b/data/2025/2504_05xxx/2504.05979/images/288f52547eff2bdfad27eac3648ba1b7a88af6beceffdce0913c9495832f57fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d7bbe16fd9aee6c28b1debd0ce64699c7b50cc0d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/288f52547eff2bdfad27eac3648ba1b7a88af6beceffdce0913c9495832f57fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b05dbff35c35325c80fcebec195fe28e4db9a7d0e0c21e951f1afa7bab4badd0 +size 9002 diff --git a/data/2025/2504_05xxx/2504.05979/images/2893eb40cbf1268dab0c08335defc17fff61e8a62c9f7f88cbb40f2797ff174b.jpg b/data/2025/2504_05xxx/2504.05979/images/2893eb40cbf1268dab0c08335defc17fff61e8a62c9f7f88cbb40f2797ff174b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ddcfb68c1107814a7a0b7f3377b6b10ae6e9b87d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2893eb40cbf1268dab0c08335defc17fff61e8a62c9f7f88cbb40f2797ff174b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfdb720be0809af5626ebdfdf362b2df38153ed7fe2c714c6d54106bfb8bb593 +size 11669 diff --git a/data/2025/2504_05xxx/2504.05979/images/28bf76b0adb60b3e53902f81e3d33090021fb2e1147bd03fb08fac858f7ac972.jpg b/data/2025/2504_05xxx/2504.05979/images/28bf76b0adb60b3e53902f81e3d33090021fb2e1147bd03fb08fac858f7ac972.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20c896b6d193db3465fc26ddb50f4fb60e85bdd4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/28bf76b0adb60b3e53902f81e3d33090021fb2e1147bd03fb08fac858f7ac972.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:376f2be779975e393a7ede507a1409cda111010906e00cda225fc70dc7ad7125 +size 17215 diff --git a/data/2025/2504_05xxx/2504.05979/images/28ce32f62ab276444f6b71d92bf45eedf3ce7200dfed382d7460f96631f73071.jpg b/data/2025/2504_05xxx/2504.05979/images/28ce32f62ab276444f6b71d92bf45eedf3ce7200dfed382d7460f96631f73071.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04164ba10db826119b87007e85769f9137c2638c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/28ce32f62ab276444f6b71d92bf45eedf3ce7200dfed382d7460f96631f73071.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99f7012d1687877221f42d5161ff5c0bd4e499e259f1618bf6d6ea68e7748024 +size 13081 diff --git a/data/2025/2504_05xxx/2504.05979/images/2929ca2301ebf3a6fed0fa5d79fbf4b686d60eeaac8dec6a4e0cf76ff49e665e.jpg b/data/2025/2504_05xxx/2504.05979/images/2929ca2301ebf3a6fed0fa5d79fbf4b686d60eeaac8dec6a4e0cf76ff49e665e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c8fbddc8389f6b4317bdccc0f32369f161f5d494 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2929ca2301ebf3a6fed0fa5d79fbf4b686d60eeaac8dec6a4e0cf76ff49e665e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39cc75f4bec8045d3ac0c7d5eec7781d4b1ced69de03325c48be729175d0c7d0 +size 892 diff --git a/data/2025/2504_05xxx/2504.05979/images/293cc3e02dece56450be7a6542b0151c4dcffe5012347d53906f237826b4a55e.jpg b/data/2025/2504_05xxx/2504.05979/images/293cc3e02dece56450be7a6542b0151c4dcffe5012347d53906f237826b4a55e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c9a57d32816994ed832b81f8ea767581d27f7214 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/293cc3e02dece56450be7a6542b0151c4dcffe5012347d53906f237826b4a55e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d99f891b97223746bf5ca52909a9ae7f942552341397c30c25f08f13468a3f5 +size 6065 diff --git a/data/2025/2504_05xxx/2504.05979/images/2982b017c59027bd916adf4c71fec2ef4c7b20ea6fcb1b3e05ccead2d64e7e70.jpg b/data/2025/2504_05xxx/2504.05979/images/2982b017c59027bd916adf4c71fec2ef4c7b20ea6fcb1b3e05ccead2d64e7e70.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cb1d773566acf73ae7229e8c8c50704d0fb431c2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2982b017c59027bd916adf4c71fec2ef4c7b20ea6fcb1b3e05ccead2d64e7e70.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09255b6d661a4fb49a9522adcdd6bd366fa2d9887b91648d8f2e07bf6b220022 +size 13963 diff --git a/data/2025/2504_05xxx/2504.05979/images/2997ae5bbb03c3903ad7d778e996f50f9bff8f0a1ab2e783b344083f30f3cce3.jpg b/data/2025/2504_05xxx/2504.05979/images/2997ae5bbb03c3903ad7d778e996f50f9bff8f0a1ab2e783b344083f30f3cce3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30a23f7ecdcbbd6e57de4607b6e3ceddd41e707d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2997ae5bbb03c3903ad7d778e996f50f9bff8f0a1ab2e783b344083f30f3cce3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20bada09ef684f8a3b20c2f27836c32e905bbd5b7cdd954c8c026ae12582efa4 +size 13323 diff --git a/data/2025/2504_05xxx/2504.05979/images/29d11d5b373c8c6684c115fc01b10b1a340f12dbedfc7dfb4b61051e6911c7fe.jpg b/data/2025/2504_05xxx/2504.05979/images/29d11d5b373c8c6684c115fc01b10b1a340f12dbedfc7dfb4b61051e6911c7fe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9abce27204aaef5a87a02d98fb268695b7923988 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/29d11d5b373c8c6684c115fc01b10b1a340f12dbedfc7dfb4b61051e6911c7fe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f552de4d171b0fbcf6815a6d46f2cc0b1795dd1b205eee1da752ff39bac81977 +size 12256 diff --git a/data/2025/2504_05xxx/2504.05979/images/29e4e012635440b4b8fd5440387c6bcf7b8f25f3e0e7c0ea56e381f52b1a1451.jpg b/data/2025/2504_05xxx/2504.05979/images/29e4e012635440b4b8fd5440387c6bcf7b8f25f3e0e7c0ea56e381f52b1a1451.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0897030e55b81b07649eac08170fe1af5745723a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/29e4e012635440b4b8fd5440387c6bcf7b8f25f3e0e7c0ea56e381f52b1a1451.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3fdbc96ecf1c36dd7f0078a2fa9a368c4c9cf0383ec0ff2c768d7e3ef25448e +size 13676 diff --git a/data/2025/2504_05xxx/2504.05979/images/29eecd09cc58857aaac407a762e3d2472c1aae1724d08052e5c17f20df19b893.jpg b/data/2025/2504_05xxx/2504.05979/images/29eecd09cc58857aaac407a762e3d2472c1aae1724d08052e5c17f20df19b893.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d012df5972910e0b7c1e38ffcb5799c04f43b3a2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/29eecd09cc58857aaac407a762e3d2472c1aae1724d08052e5c17f20df19b893.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5347dae9b2915dc6bf4f1714ed06663570d43d9a4827f5b9af97c111fe00570e +size 14420 diff --git a/data/2025/2504_05xxx/2504.05979/images/29f5e73b544db2cde87a7ae619bda23d4e11cc25cfb4269634f02ce791227353.jpg b/data/2025/2504_05xxx/2504.05979/images/29f5e73b544db2cde87a7ae619bda23d4e11cc25cfb4269634f02ce791227353.jpg new file mode 100644 index 0000000000000000000000000000000000000000..632c8e5b28621b2e1766d288fab91b329cd4f254 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/29f5e73b544db2cde87a7ae619bda23d4e11cc25cfb4269634f02ce791227353.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5eadc78910ec4e29be2ebfadb293dff67a6b4cb87ec78438aec4bc5d1b26d6f2 +size 7841 diff --git a/data/2025/2504_05xxx/2504.05979/images/2a04299128063c19b3d18cf2c84c9a40f476b3c8ef92b747667e18981dce0475.jpg b/data/2025/2504_05xxx/2504.05979/images/2a04299128063c19b3d18cf2c84c9a40f476b3c8ef92b747667e18981dce0475.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ad13b65b5213f362929afa543d81f8e3b299879 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2a04299128063c19b3d18cf2c84c9a40f476b3c8ef92b747667e18981dce0475.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5b5e01709fe2f79de2a5e7bf6abe8cd97371ffd005d6c3534e6bfceeff736cf +size 5647 diff --git a/data/2025/2504_05xxx/2504.05979/images/2a2ba40c70067c2bbfe87ba3140fb913b131f18ca0f6a0801c01555583398b16.jpg b/data/2025/2504_05xxx/2504.05979/images/2a2ba40c70067c2bbfe87ba3140fb913b131f18ca0f6a0801c01555583398b16.jpg new file mode 100644 index 0000000000000000000000000000000000000000..66896a95411f8c4ad932cce26f87598a51276c65 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2a2ba40c70067c2bbfe87ba3140fb913b131f18ca0f6a0801c01555583398b16.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa528ceac92978eaa3aab9974dbc0f4aa118e35d367363ce725efbf29c132a11 +size 11629 diff --git a/data/2025/2504_05xxx/2504.05979/images/2ac37d8f19a0434436d70e6adb1d281a20d590ab3f0d0d7a7fa32893d0767278.jpg b/data/2025/2504_05xxx/2504.05979/images/2ac37d8f19a0434436d70e6adb1d281a20d590ab3f0d0d7a7fa32893d0767278.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4bbb3bf2e65f60269d5b38477278fe8df3184900 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2ac37d8f19a0434436d70e6adb1d281a20d590ab3f0d0d7a7fa32893d0767278.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:025e2117abaa035d924dcc6baaaa07d8252af73db587d1e39677be7949eebd74 +size 15329 diff --git a/data/2025/2504_05xxx/2504.05979/images/2aea31980297b8bd9effa16d06efa9dd2a95f1e6a7b7a99dc48e7bb6e4ed6a27.jpg b/data/2025/2504_05xxx/2504.05979/images/2aea31980297b8bd9effa16d06efa9dd2a95f1e6a7b7a99dc48e7bb6e4ed6a27.jpg new file mode 100644 index 0000000000000000000000000000000000000000..48e6252d152a21bb327bc60cc934389e0c50cffa --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2aea31980297b8bd9effa16d06efa9dd2a95f1e6a7b7a99dc48e7bb6e4ed6a27.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5664d44abcdbec2f3610eaf6d18f255c090cab509d2cfc34540303c858bb4aa9 +size 9861 diff --git a/data/2025/2504_05xxx/2504.05979/images/2b470b9a2e3376254260fb3776461599c8d766e1b076eaec1370e72b4b2e5b06.jpg b/data/2025/2504_05xxx/2504.05979/images/2b470b9a2e3376254260fb3776461599c8d766e1b076eaec1370e72b4b2e5b06.jpg new file mode 100644 index 0000000000000000000000000000000000000000..801061a45831485205f60e5e640006ab160fb2da --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2b470b9a2e3376254260fb3776461599c8d766e1b076eaec1370e72b4b2e5b06.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca4691a2604a9f8140a1dbb738201ce61be5601cecf82b7179f69853603f7a9b +size 8920 diff --git a/data/2025/2504_05xxx/2504.05979/images/2b5cbcdbe86ff5d8016d28bf0a81a16d30075a3c50e64b84bfd2e7a4f9dbfb54.jpg b/data/2025/2504_05xxx/2504.05979/images/2b5cbcdbe86ff5d8016d28bf0a81a16d30075a3c50e64b84bfd2e7a4f9dbfb54.jpg new file mode 100644 index 0000000000000000000000000000000000000000..afeb02057a62de4168bd6b6c90dc9a488db0684b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2b5cbcdbe86ff5d8016d28bf0a81a16d30075a3c50e64b84bfd2e7a4f9dbfb54.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6bcc7481c9d39b42f86d1f395a596a7edafc92886e9466cdce721d8e61c3a82 +size 10728 diff --git a/data/2025/2504_05xxx/2504.05979/images/2b91fde29b7d65bb3965853dad07b3b346f976065009ddf3ee693efe1429b9de.jpg b/data/2025/2504_05xxx/2504.05979/images/2b91fde29b7d65bb3965853dad07b3b346f976065009ddf3ee693efe1429b9de.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20e6a7119dd3a271f4ed22764b63f2b3b19cc96b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2b91fde29b7d65bb3965853dad07b3b346f976065009ddf3ee693efe1429b9de.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b7b3f2efbf24b9cc696d0c4bd3875073ef9b4c952aa39598298473da565ec43 +size 13557 diff --git a/data/2025/2504_05xxx/2504.05979/images/2bc479794ab27b52ddd075f3f33d1e15a260aae2e169b634378ccdde014ad491.jpg b/data/2025/2504_05xxx/2504.05979/images/2bc479794ab27b52ddd075f3f33d1e15a260aae2e169b634378ccdde014ad491.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d0504ef574980eaa9ddbbbe2e354141f788fa49f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2bc479794ab27b52ddd075f3f33d1e15a260aae2e169b634378ccdde014ad491.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77e9db753609bc3655d89be24a82276b774c2f9f9e1c23349f723a35f9f86cac +size 13100 diff --git a/data/2025/2504_05xxx/2504.05979/images/2c0a4cd420ef9d5e4634c2962449526f8adf2f9dc3c5b4ca6622d4b56c4d08ad.jpg b/data/2025/2504_05xxx/2504.05979/images/2c0a4cd420ef9d5e4634c2962449526f8adf2f9dc3c5b4ca6622d4b56c4d08ad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dd7bbe9bd303709e94fe3757a08cd4c9eb8091c6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2c0a4cd420ef9d5e4634c2962449526f8adf2f9dc3c5b4ca6622d4b56c4d08ad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0317c4c0834cda9288153f3a3e6dc1a91d65832e7ccc18a8b755c258bc1a77f1 +size 5751 diff --git a/data/2025/2504_05xxx/2504.05979/images/2c63f34552d2fef8c8e86f7dd76a62315f4c1dd4bcb26b2198e0e7acd23d6ef7.jpg b/data/2025/2504_05xxx/2504.05979/images/2c63f34552d2fef8c8e86f7dd76a62315f4c1dd4bcb26b2198e0e7acd23d6ef7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a761018a254565adef1265e7d3b64b0d57c7c415 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2c63f34552d2fef8c8e86f7dd76a62315f4c1dd4bcb26b2198e0e7acd23d6ef7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c0403c5bc43bdf45bafcea6a3afdeaf3a8efb9fc5d39b75e8296f1766b31b8d +size 7443 diff --git a/data/2025/2504_05xxx/2504.05979/images/2c704075d95d45fe4f0f7a7b678bfc45ee73af1834623f4fe6e02497499106c1.jpg b/data/2025/2504_05xxx/2504.05979/images/2c704075d95d45fe4f0f7a7b678bfc45ee73af1834623f4fe6e02497499106c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d9e5718bb6c10f159ed5356d58645d456dd96e6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2c704075d95d45fe4f0f7a7b678bfc45ee73af1834623f4fe6e02497499106c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17c160051ef06b1f92e83805329356a8c3d013eeac0ce8f7308d54169e9cc178 +size 16091 diff --git a/data/2025/2504_05xxx/2504.05979/images/2c9aaa16beceac6929cab3c491e5386e4870c1879821502bc939b57f21a74ac6.jpg b/data/2025/2504_05xxx/2504.05979/images/2c9aaa16beceac6929cab3c491e5386e4870c1879821502bc939b57f21a74ac6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..512b180a200f7cd48e86ff2baa0582ee22f2d4e0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2c9aaa16beceac6929cab3c491e5386e4870c1879821502bc939b57f21a74ac6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f03979858a7552583a010fcfba1d1392b450750d6b418d0dccd2ded5404cd11b +size 13014 diff --git a/data/2025/2504_05xxx/2504.05979/images/2cef513138676cfd0753561a3373c26db0d5be094be17da19006ce2882bdbd13.jpg b/data/2025/2504_05xxx/2504.05979/images/2cef513138676cfd0753561a3373c26db0d5be094be17da19006ce2882bdbd13.jpg new file mode 100644 index 0000000000000000000000000000000000000000..829842ca89d3cbbb1af9afb6f7b2a34a51c04f70 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2cef513138676cfd0753561a3373c26db0d5be094be17da19006ce2882bdbd13.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ee0a991e355495514bc17fddbe983a7000a766cb0bcc1ced6a37430bd4956c3 +size 4037 diff --git a/data/2025/2504_05xxx/2504.05979/images/2d141aa5b85e10194f8145486f1551abb222659e93a01d192127766df99e63e9.jpg b/data/2025/2504_05xxx/2504.05979/images/2d141aa5b85e10194f8145486f1551abb222659e93a01d192127766df99e63e9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46b8ad90fff85d2cf3f307c5b2946a5120418b7a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2d141aa5b85e10194f8145486f1551abb222659e93a01d192127766df99e63e9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62c5e2e7e26c33fa707262bebd85df28c53c4aa0452bc38c7a22ec85f287a3e4 +size 10706 diff --git a/data/2025/2504_05xxx/2504.05979/images/2d588085b2425d35e86ead8fd151a9f1b0a00f306e88ed5971910bddaaa8e8b1.jpg b/data/2025/2504_05xxx/2504.05979/images/2d588085b2425d35e86ead8fd151a9f1b0a00f306e88ed5971910bddaaa8e8b1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5890f0874fe3ae8f02c666a7969939d0dc235601 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2d588085b2425d35e86ead8fd151a9f1b0a00f306e88ed5971910bddaaa8e8b1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d26be9ebed987e0f134884da1e1040f58697476237cb10670be7d32dcd8ac216 +size 993 diff --git a/data/2025/2504_05xxx/2504.05979/images/2de6dd439640e1a491cd2669646ca4deab67272994343185f41956a82e6a40a1.jpg b/data/2025/2504_05xxx/2504.05979/images/2de6dd439640e1a491cd2669646ca4deab67272994343185f41956a82e6a40a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..19d5c1df73e9e74ef4a60a26e56d4bf9d41baf8e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2de6dd439640e1a491cd2669646ca4deab67272994343185f41956a82e6a40a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b9631f0d76916d11fa6d8b5813b7f2eada0edc8e35cee19a9cb80cfd8e1c54d +size 22833 diff --git a/data/2025/2504_05xxx/2504.05979/images/2e8258faee6378598f378a9a63923d7d64ed24c4a8d936ba6407f7a1752ce76a.jpg b/data/2025/2504_05xxx/2504.05979/images/2e8258faee6378598f378a9a63923d7d64ed24c4a8d936ba6407f7a1752ce76a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c3072778749903e016a7a0415c94948a48a4a7f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2e8258faee6378598f378a9a63923d7d64ed24c4a8d936ba6407f7a1752ce76a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a81c40bf787de7e412ec856e0b0bb9d00b77416915954027b68ae10d8685de5b +size 13157 diff --git a/data/2025/2504_05xxx/2504.05979/images/2e8e24d1cba72b5e1d3fa71c6329231f5130c1a2c84e08710a2bfcef26d50f3e.jpg b/data/2025/2504_05xxx/2504.05979/images/2e8e24d1cba72b5e1d3fa71c6329231f5130c1a2c84e08710a2bfcef26d50f3e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4138e78389123742a1020801d8ebc4bcd2cbe331 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2e8e24d1cba72b5e1d3fa71c6329231f5130c1a2c84e08710a2bfcef26d50f3e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02250654f45c9df734ad18c6a47804e6ffbd374125ee4fcab128272a272715e3 +size 9339 diff --git a/data/2025/2504_05xxx/2504.05979/images/2ea1368e7787c97c60d6731352894c9323ed501fb6dc4e2d3cb1aa2c697b91f0.jpg b/data/2025/2504_05xxx/2504.05979/images/2ea1368e7787c97c60d6731352894c9323ed501fb6dc4e2d3cb1aa2c697b91f0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c93c6038c66ce157e22f38446f32d79f346622c3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2ea1368e7787c97c60d6731352894c9323ed501fb6dc4e2d3cb1aa2c697b91f0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e52e97aaf11a88f8a2b878b7ebe964c61d79794452b13cad56f9087cecdc8d9 +size 8811 diff --git a/data/2025/2504_05xxx/2504.05979/images/2ed33745b63fd77dc89a8024a98963f50198ec8a1e52ceda4ab8062eb4b2085d.jpg b/data/2025/2504_05xxx/2504.05979/images/2ed33745b63fd77dc89a8024a98963f50198ec8a1e52ceda4ab8062eb4b2085d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7aecde92a78ee8db96c6f38be78456533fb170c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2ed33745b63fd77dc89a8024a98963f50198ec8a1e52ceda4ab8062eb4b2085d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:643c96dadcbf8117546349ce47140b5f86a56ca76e7f0a3a4aa541ca2ece5f6d +size 9581 diff --git a/data/2025/2504_05xxx/2504.05979/images/2f4b6a584e5347f2a2ffa4c55015f094171d881e87ced85fc577e90c71f0f4a9.jpg b/data/2025/2504_05xxx/2504.05979/images/2f4b6a584e5347f2a2ffa4c55015f094171d881e87ced85fc577e90c71f0f4a9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c5ba03adaac8c5f30fa284a392e66fba96cfdf22 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2f4b6a584e5347f2a2ffa4c55015f094171d881e87ced85fc577e90c71f0f4a9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4cfad5e62040b4cb988dde6e27be78f137ab0ee896d685bc66952d73283d44f2 +size 11816 diff --git a/data/2025/2504_05xxx/2504.05979/images/2f53f480f09632c6f3fce2f380303d7ddfff3462b4f02b3fbd217d4b98cd7bfb.jpg b/data/2025/2504_05xxx/2504.05979/images/2f53f480f09632c6f3fce2f380303d7ddfff3462b4f02b3fbd217d4b98cd7bfb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b66d31a850a61d0edeacaaac77c57c0e948146f4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2f53f480f09632c6f3fce2f380303d7ddfff3462b4f02b3fbd217d4b98cd7bfb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94a33a2516ca6f17b49b18fee5147de22db53b88797490509b1c172c18977241 +size 8100 diff --git a/data/2025/2504_05xxx/2504.05979/images/2f5b1c3c3a38c0ef1182e319f7bb487e17c8dec0cf207f9ddae29bd0055c93dc.jpg b/data/2025/2504_05xxx/2504.05979/images/2f5b1c3c3a38c0ef1182e319f7bb487e17c8dec0cf207f9ddae29bd0055c93dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f6ac0ab9aea4df875c00295f86495a5530f7249e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2f5b1c3c3a38c0ef1182e319f7bb487e17c8dec0cf207f9ddae29bd0055c93dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7b848a690b669cbd474bc2bfceec0ed193db24b18181773c03f4d010051c6ea +size 12462 diff --git a/data/2025/2504_05xxx/2504.05979/images/2f732805f2b0034e45b6e06d068adc0a84b73d56f50fb84d71c1d98da5382f36.jpg b/data/2025/2504_05xxx/2504.05979/images/2f732805f2b0034e45b6e06d068adc0a84b73d56f50fb84d71c1d98da5382f36.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b4264c2db34e54a134494c171268174964a8270a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2f732805f2b0034e45b6e06d068adc0a84b73d56f50fb84d71c1d98da5382f36.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67a0a93b5fa7bd3b752e89b5439247019512460e65b45cc4c05c2723a04d786d +size 10550 diff --git a/data/2025/2504_05xxx/2504.05979/images/2f8ba2525f180355e9429c8aa78eab3fdc5e1c1c77db35dd88729c6608f735e8.jpg b/data/2025/2504_05xxx/2504.05979/images/2f8ba2525f180355e9429c8aa78eab3fdc5e1c1c77db35dd88729c6608f735e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6bf2971e8cd5d84f38f5d653453c51edf5e907ce --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2f8ba2525f180355e9429c8aa78eab3fdc5e1c1c77db35dd88729c6608f735e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c736ad4f994d27020a2f0f7fe8db2967290c8314629d535f0f3d01bb17e33b6 +size 5118 diff --git a/data/2025/2504_05xxx/2504.05979/images/2f8dd6355dcb71362e42158e49ed0aac5ddd056ed4d4516867721782b9b2a0b2.jpg b/data/2025/2504_05xxx/2504.05979/images/2f8dd6355dcb71362e42158e49ed0aac5ddd056ed4d4516867721782b9b2a0b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a46187f221becfdd4bc3f455564e6d63f3196644 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2f8dd6355dcb71362e42158e49ed0aac5ddd056ed4d4516867721782b9b2a0b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8860feb506de70279a956f48d86a8b48ea50e542da0b03ebd2a94d774aa28a7 +size 6313 diff --git a/data/2025/2504_05xxx/2504.05979/images/2fc6af7cdb8c9f7e71021c537aa0aba9a3610880f2762857e8317da88256e3c0.jpg b/data/2025/2504_05xxx/2504.05979/images/2fc6af7cdb8c9f7e71021c537aa0aba9a3610880f2762857e8317da88256e3c0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..415cf31e8e99b8131c10461c9eedde234e2bc69c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/2fc6af7cdb8c9f7e71021c537aa0aba9a3610880f2762857e8317da88256e3c0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fdce8997e9b2397fcd87a0d107c983a6c4615b03554445a031e59573004bbeb +size 6809 diff --git a/data/2025/2504_05xxx/2504.05979/images/3015117dffd29de40979d63938f43194ed356ec24b893f0eca89a8e4d2397504.jpg b/data/2025/2504_05xxx/2504.05979/images/3015117dffd29de40979d63938f43194ed356ec24b893f0eca89a8e4d2397504.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0b96b01fbdfe72f280565ce6ba3b3f34ae6c238b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3015117dffd29de40979d63938f43194ed356ec24b893f0eca89a8e4d2397504.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c47b6c24dae08e7efdd00b3f0cd7b1dc13c15802a747bceed292fe9fc394f652 +size 15683 diff --git a/data/2025/2504_05xxx/2504.05979/images/301c1384224e00b3fa898587c4f5578363c3b8189cb4bd2ba2d363bd9d31a9c8.jpg b/data/2025/2504_05xxx/2504.05979/images/301c1384224e00b3fa898587c4f5578363c3b8189cb4bd2ba2d363bd9d31a9c8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dba3bd192e010cb5b3d022496937caa9954c41c6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/301c1384224e00b3fa898587c4f5578363c3b8189cb4bd2ba2d363bd9d31a9c8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8417c6f70065ed30e16c70919ba46b41363d0aec2484c2528a5e57e3c869b2f6 +size 3008 diff --git a/data/2025/2504_05xxx/2504.05979/images/302a3887daa57a9516027cfc6473b7293f8f9c6bd1007fa32f38d00d07543191.jpg b/data/2025/2504_05xxx/2504.05979/images/302a3887daa57a9516027cfc6473b7293f8f9c6bd1007fa32f38d00d07543191.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d7e0bff8b8c278438562c3b9d1d265f0ff12b3ff --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/302a3887daa57a9516027cfc6473b7293f8f9c6bd1007fa32f38d00d07543191.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a247e6b8ab39ad2c7fc1291f7824aff7544756e812b419557cba94f0f9ae2a9c +size 12384 diff --git a/data/2025/2504_05xxx/2504.05979/images/303aadcf1f413c53fe51a10f2bbdb1d2f38eaebbbf1bafbaf65caa46e66c8863.jpg b/data/2025/2504_05xxx/2504.05979/images/303aadcf1f413c53fe51a10f2bbdb1d2f38eaebbbf1bafbaf65caa46e66c8863.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1cf57b469e9560f80c2a2a1c0c8e5191304d158c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/303aadcf1f413c53fe51a10f2bbdb1d2f38eaebbbf1bafbaf65caa46e66c8863.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80e081590bb6707886018e69cb416d31ee5df1bee5e55e5129bc39a0bcbe6745 +size 7623 diff --git a/data/2025/2504_05xxx/2504.05979/images/30ed799a330abd36ec3b456792e50674b62123dda7d183a4909f198292186c06.jpg b/data/2025/2504_05xxx/2504.05979/images/30ed799a330abd36ec3b456792e50674b62123dda7d183a4909f198292186c06.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb97d1f583883cf4597bb24098ca878789ebae05 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/30ed799a330abd36ec3b456792e50674b62123dda7d183a4909f198292186c06.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b9a29d3d638d478fb692a2c502d676a2698db9429fb59d164325abf3588d470 +size 1009 diff --git a/data/2025/2504_05xxx/2504.05979/images/314ecf8346962cc266f1e67d8f88be07f8a8eaca1ca9d18ed756eea986ca6a35.jpg b/data/2025/2504_05xxx/2504.05979/images/314ecf8346962cc266f1e67d8f88be07f8a8eaca1ca9d18ed756eea986ca6a35.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a1ffc95070862eee1adf6ec5b65257d7d625a3c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/314ecf8346962cc266f1e67d8f88be07f8a8eaca1ca9d18ed756eea986ca6a35.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52b78d35deb4b98f6a39bfde5e3c8b259c331fc7452adb3a16698df568f6ac0d +size 13724 diff --git a/data/2025/2504_05xxx/2504.05979/images/324c435155e439e0e4927509fb4bc07e46d67b68706e04a95c81c2c43616dade.jpg b/data/2025/2504_05xxx/2504.05979/images/324c435155e439e0e4927509fb4bc07e46d67b68706e04a95c81c2c43616dade.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3057bf0aca941f9709ae26bd546963da51ddeee2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/324c435155e439e0e4927509fb4bc07e46d67b68706e04a95c81c2c43616dade.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7f957267e54b5cf7e3bfb4beaf6c42e975e202a8de1986de4c9ae513bed57d8 +size 16957 diff --git a/data/2025/2504_05xxx/2504.05979/images/3270cef07702d259baaf487526f2ae6da683fab7cc7fbfd29b04b26044a58e69.jpg b/data/2025/2504_05xxx/2504.05979/images/3270cef07702d259baaf487526f2ae6da683fab7cc7fbfd29b04b26044a58e69.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e9f82004479ecdd1b8d6c243dfb7b34687bcf8b6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3270cef07702d259baaf487526f2ae6da683fab7cc7fbfd29b04b26044a58e69.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2b5a93be1596fed2a8f0d1ebe3255eb65c94563e8b201d1c3a1327d784bd3eb +size 12362 diff --git a/data/2025/2504_05xxx/2504.05979/images/327aff3d754879bc07e69c135ac470cd76c2e5d5328c263cd495368496462207.jpg b/data/2025/2504_05xxx/2504.05979/images/327aff3d754879bc07e69c135ac470cd76c2e5d5328c263cd495368496462207.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1286cd89ec8b9983c16740d320c6b31688f7577b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/327aff3d754879bc07e69c135ac470cd76c2e5d5328c263cd495368496462207.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f46be5120c2ee9687c9e0f8c53ef09c4d7b6ee65b9fcb17d5e8c3c716b1f732d +size 8862 diff --git a/data/2025/2504_05xxx/2504.05979/images/32972f2863a4ed568d406e940ec73fb2acb89d05a9ce88264831494aca2909d5.jpg b/data/2025/2504_05xxx/2504.05979/images/32972f2863a4ed568d406e940ec73fb2acb89d05a9ce88264831494aca2909d5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5158fc3225403df66a15168d6756998cdae16e3a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/32972f2863a4ed568d406e940ec73fb2acb89d05a9ce88264831494aca2909d5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fc0908a9051b899af1e73dd236397d1980405a64f604e3721232d71c34549ef +size 7240 diff --git a/data/2025/2504_05xxx/2504.05979/images/32ffc1eb96999685c0b89d169c9aa5316abd2e06ff28b61ba0beef011cd7d160.jpg b/data/2025/2504_05xxx/2504.05979/images/32ffc1eb96999685c0b89d169c9aa5316abd2e06ff28b61ba0beef011cd7d160.jpg new file mode 100644 index 0000000000000000000000000000000000000000..13be9a12a64e0d4ff56b6aaf39d4f0c0eefabfd9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/32ffc1eb96999685c0b89d169c9aa5316abd2e06ff28b61ba0beef011cd7d160.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f86865ac11fd1ec6e0f413f22c4c8e16293086335f0d23d7e07b2092a750b4d +size 8170 diff --git a/data/2025/2504_05xxx/2504.05979/images/334e57c72d615f0c16cf7f289ed3e1236adf82ed7e69d06552fc24a1910711db.jpg b/data/2025/2504_05xxx/2504.05979/images/334e57c72d615f0c16cf7f289ed3e1236adf82ed7e69d06552fc24a1910711db.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee9e815df24da1a0ddd63d6bab83e43647e33591 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/334e57c72d615f0c16cf7f289ed3e1236adf82ed7e69d06552fc24a1910711db.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3664ac731eadbc2c107d8d90ed419cdc0169c1148130c789f308b67c715e32e5 +size 5571 diff --git a/data/2025/2504_05xxx/2504.05979/images/3395f7e5893f7029588b9d5b16f3549b7c5baa04869e047e731f2d72c81f6981.jpg b/data/2025/2504_05xxx/2504.05979/images/3395f7e5893f7029588b9d5b16f3549b7c5baa04869e047e731f2d72c81f6981.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dcab6c29e8d510b684c58340de2e65d4cd3fb00e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3395f7e5893f7029588b9d5b16f3549b7c5baa04869e047e731f2d72c81f6981.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b5bf6759343626a6d08ee674348296963f319e76e73400a5a9332ed6ce92b6c +size 976 diff --git a/data/2025/2504_05xxx/2504.05979/images/33aeab41543cb1f610d264106b611a2050ccfaaf21583d7c7034d3b1ef94d142.jpg b/data/2025/2504_05xxx/2504.05979/images/33aeab41543cb1f610d264106b611a2050ccfaaf21583d7c7034d3b1ef94d142.jpg new file mode 100644 index 0000000000000000000000000000000000000000..540a499cbcca32e9cdbae44e787eaeda743eddb9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/33aeab41543cb1f610d264106b611a2050ccfaaf21583d7c7034d3b1ef94d142.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b733011e9985026e8b54ce73520b7c283005e01a7f356db7e9b2637635d20f27 +size 7300 diff --git a/data/2025/2504_05xxx/2504.05979/images/33b1124e10743638cb062747979a788b7ede97dab752dac0e2e8cac1db1a2516.jpg b/data/2025/2504_05xxx/2504.05979/images/33b1124e10743638cb062747979a788b7ede97dab752dac0e2e8cac1db1a2516.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd906d7009ab7a0515acc6228f493e1f337a8941 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/33b1124e10743638cb062747979a788b7ede97dab752dac0e2e8cac1db1a2516.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b272046ac735f8857eb8ae17ef121e43fa900cf2f1f9eec4e1f752a7370e73af +size 13754 diff --git a/data/2025/2504_05xxx/2504.05979/images/34364cd752d523f7253e2bed26e26e7bb958064d8b44d8df3b0c491d05214cb2.jpg b/data/2025/2504_05xxx/2504.05979/images/34364cd752d523f7253e2bed26e26e7bb958064d8b44d8df3b0c491d05214cb2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cf5338bdd83bad596ad7c8b7710ffadcce12688d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/34364cd752d523f7253e2bed26e26e7bb958064d8b44d8df3b0c491d05214cb2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c23654feb36d35ebc48f3699cc31bf8860973ab8073b7ed9cc196994407efe86 +size 8183 diff --git a/data/2025/2504_05xxx/2504.05979/images/34545e79e79f3078a365a6cb3d938519fac2d3ed1ebd3db57368a178779167ae.jpg b/data/2025/2504_05xxx/2504.05979/images/34545e79e79f3078a365a6cb3d938519fac2d3ed1ebd3db57368a178779167ae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69883281f864d0cc9c89e8ab9f6f23857f653721 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/34545e79e79f3078a365a6cb3d938519fac2d3ed1ebd3db57368a178779167ae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c79ecfe4545d89bbc55c7ed8b5ed6d354897477882dc1a21f59ddb53ff55637d +size 8255 diff --git a/data/2025/2504_05xxx/2504.05979/images/345956a47d1ba18bc47ef420be35f2680ee92364e251cc4f0c85890f41d35a52.jpg b/data/2025/2504_05xxx/2504.05979/images/345956a47d1ba18bc47ef420be35f2680ee92364e251cc4f0c85890f41d35a52.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b1602a2d61fe093210989bb56e0d6e94e56237bf --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/345956a47d1ba18bc47ef420be35f2680ee92364e251cc4f0c85890f41d35a52.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db032b36386338e018fdcc19c795dcecfc662bf26a78262ffd1c437e85264097 +size 8298 diff --git a/data/2025/2504_05xxx/2504.05979/images/34eb588af3ad0d16e975cf94c0165c9ea9cdbab78610d9c4fea8f37457c30f62.jpg b/data/2025/2504_05xxx/2504.05979/images/34eb588af3ad0d16e975cf94c0165c9ea9cdbab78610d9c4fea8f37457c30f62.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e941dbb78b327f5d10eaa7ac8665a4bf095081a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/34eb588af3ad0d16e975cf94c0165c9ea9cdbab78610d9c4fea8f37457c30f62.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c3c9530c0d2dce8f892303e4f0066865fc23b08a6d8f1a4b62ae644d064d2f6 +size 34949 diff --git a/data/2025/2504_05xxx/2504.05979/images/34f3e69973949e582903e9064b4ccc8b319f75b359178b7208421be704e55d45.jpg b/data/2025/2504_05xxx/2504.05979/images/34f3e69973949e582903e9064b4ccc8b319f75b359178b7208421be704e55d45.jpg new file mode 100644 index 0000000000000000000000000000000000000000..13ff91fa8f98c970b0adc9a806c091e73b39abfd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/34f3e69973949e582903e9064b4ccc8b319f75b359178b7208421be704e55d45.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd07d9df93715595cb914e030f7930a791b38b63a31ef0ae2cf27df6a5a20a05 +size 24889 diff --git a/data/2025/2504_05xxx/2504.05979/images/35081435a9dddead08f2e1591ed883e214e7ed45a52ef1bb2df34789a8f65f50.jpg b/data/2025/2504_05xxx/2504.05979/images/35081435a9dddead08f2e1591ed883e214e7ed45a52ef1bb2df34789a8f65f50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..23e5f397c8152810c7b0db3b9c9f0b4cf6e8b76e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/35081435a9dddead08f2e1591ed883e214e7ed45a52ef1bb2df34789a8f65f50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06554685eebc99255f299c00a505e68a958832c6ae934c0e14aee6a476ef706a +size 9955 diff --git a/data/2025/2504_05xxx/2504.05979/images/3520766d4bd31fe95534ccef2679e0d3237151e64a06cfbdbcc03285013be219.jpg b/data/2025/2504_05xxx/2504.05979/images/3520766d4bd31fe95534ccef2679e0d3237151e64a06cfbdbcc03285013be219.jpg new file mode 100644 index 0000000000000000000000000000000000000000..458bb6107f8f5adcd3fd4faa62af5e35effc59ba --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3520766d4bd31fe95534ccef2679e0d3237151e64a06cfbdbcc03285013be219.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:425c27911637a17fb2cdadefd556e400ddcdb7bbaca249d60e82ca33e30a0ce5 +size 9568 diff --git a/data/2025/2504_05xxx/2504.05979/images/3569ee4edc461e7da1c8372a4a522d92cca61c99b430368708a6b7dc82703262.jpg b/data/2025/2504_05xxx/2504.05979/images/3569ee4edc461e7da1c8372a4a522d92cca61c99b430368708a6b7dc82703262.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c94c4fd477f733932108c7a80f46d02ca75f9a14 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3569ee4edc461e7da1c8372a4a522d92cca61c99b430368708a6b7dc82703262.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b430bb644f6efb75f790edd4611cb71c5c971c4ea80e73646b0c35ec7472e2d2 +size 5636 diff --git a/data/2025/2504_05xxx/2504.05979/images/359582282a17e20740a8e878147c496925396e8e780d4f8429b70706e235f66f.jpg b/data/2025/2504_05xxx/2504.05979/images/359582282a17e20740a8e878147c496925396e8e780d4f8429b70706e235f66f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..448f4b18ad607a4aeb1a40bc580bb13a725abfee --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/359582282a17e20740a8e878147c496925396e8e780d4f8429b70706e235f66f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9419de027310332fabdb8175786ae203d75396fabd8a45e82a66a7ddec94fba6 +size 6664 diff --git a/data/2025/2504_05xxx/2504.05979/images/35b326fb2148865594e763630192efc498f76114d6db2844fc81e1f5810bf287.jpg b/data/2025/2504_05xxx/2504.05979/images/35b326fb2148865594e763630192efc498f76114d6db2844fc81e1f5810bf287.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2ae805aad807675eba0cb3206fd298ffd110a649 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/35b326fb2148865594e763630192efc498f76114d6db2844fc81e1f5810bf287.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:870ab4d4828584e0779043af0ca2e5975492e7862af54a838cd49bbc5dc69c7a +size 906 diff --git a/data/2025/2504_05xxx/2504.05979/images/36374494cf58c80bf10d518e18df6bb7c26dfed3007927ce329edde282b717d7.jpg b/data/2025/2504_05xxx/2504.05979/images/36374494cf58c80bf10d518e18df6bb7c26dfed3007927ce329edde282b717d7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee6a5ee371061374bedf188dce345cee516e32ce --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/36374494cf58c80bf10d518e18df6bb7c26dfed3007927ce329edde282b717d7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac704c52f151d0bef9fd8dad25936541bbf2502123dc87f8b4eea2e96ecdd7e6 +size 7767 diff --git a/data/2025/2504_05xxx/2504.05979/images/36a0b0bce8ab290695f007ef9c9240f85222009951f43efd16512c909c48a75e.jpg b/data/2025/2504_05xxx/2504.05979/images/36a0b0bce8ab290695f007ef9c9240f85222009951f43efd16512c909c48a75e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aebe6ba404457b04206007c5e3a4d88e32a631f3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/36a0b0bce8ab290695f007ef9c9240f85222009951f43efd16512c909c48a75e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69d3d3c87abebb86afcfb2e916831ab540a4269ee35c9c34d79bcbcf8730fdbe +size 10254 diff --git a/data/2025/2504_05xxx/2504.05979/images/36ad8877a81602152fef02eb279de3452a2d3bcd29e3cca2508e6eb27da3dd21.jpg b/data/2025/2504_05xxx/2504.05979/images/36ad8877a81602152fef02eb279de3452a2d3bcd29e3cca2508e6eb27da3dd21.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a1113432c58546b8bf2e3254001a61502b3d5f5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/36ad8877a81602152fef02eb279de3452a2d3bcd29e3cca2508e6eb27da3dd21.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d7a7f01d0b02e1e9c36f3ec374291ab2a773cce1f527e226bedda4dd47dd038 +size 3334 diff --git a/data/2025/2504_05xxx/2504.05979/images/36ba8480160ce7a11902f9d63c93a779ad81abc66d85c984fe46a402a46397ff.jpg b/data/2025/2504_05xxx/2504.05979/images/36ba8480160ce7a11902f9d63c93a779ad81abc66d85c984fe46a402a46397ff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..44a116a2b3456b08da6fdf83b36a676c6c1ffe5b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/36ba8480160ce7a11902f9d63c93a779ad81abc66d85c984fe46a402a46397ff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50af66e647279297af99ece10f563bbe26362ff3c3c767de4557ed9fb1306621 +size 15678 diff --git a/data/2025/2504_05xxx/2504.05979/images/3745e5cc44c159dbc30c05e6abfee39434906086aa082386ab669e6a3800719e.jpg b/data/2025/2504_05xxx/2504.05979/images/3745e5cc44c159dbc30c05e6abfee39434906086aa082386ab669e6a3800719e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f7b765dfbce9eafd7f158474b5b04e569b3b715 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3745e5cc44c159dbc30c05e6abfee39434906086aa082386ab669e6a3800719e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99573116826167e4de3c08a8a8867de336e74c7ed6c6d2e80d09dd09bb7a498f +size 11578 diff --git a/data/2025/2504_05xxx/2504.05979/images/37c0d9d0ba1eed41834771a2d8df3690d2b37294a08a5844702161e53aa817ab.jpg b/data/2025/2504_05xxx/2504.05979/images/37c0d9d0ba1eed41834771a2d8df3690d2b37294a08a5844702161e53aa817ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..370641c28302be463605d5d63446e3fe1258fb95 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/37c0d9d0ba1eed41834771a2d8df3690d2b37294a08a5844702161e53aa817ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:521228443c03af610462b3a15fb40fcbca5983c3a7e4703393ebdb9e1a176d11 +size 22137 diff --git a/data/2025/2504_05xxx/2504.05979/images/37e0cb1ddf2d5338758f440584a48a5f50fe2d534e9b2486a05a7690ed334ed9.jpg b/data/2025/2504_05xxx/2504.05979/images/37e0cb1ddf2d5338758f440584a48a5f50fe2d534e9b2486a05a7690ed334ed9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d440c95b2f892cf3e23aac0cf822c69ddc512503 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/37e0cb1ddf2d5338758f440584a48a5f50fe2d534e9b2486a05a7690ed334ed9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e230fb3a47beac2abcf49d9046ab486c0d2ee469b1ed9a55cc20049e3cb75cd9 +size 15197 diff --git a/data/2025/2504_05xxx/2504.05979/images/37feb29d085233a01159a45ae5e03d6b57ff8f11c715ca55b4a77dfee3fc645b.jpg b/data/2025/2504_05xxx/2504.05979/images/37feb29d085233a01159a45ae5e03d6b57ff8f11c715ca55b4a77dfee3fc645b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6fe028980fd38a0a99981b64409245f6b356ba65 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/37feb29d085233a01159a45ae5e03d6b57ff8f11c715ca55b4a77dfee3fc645b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:094d7beef12ceb26713af56bc2504b260bf5aeffce0d8e558aad8eab6e6a3e9f +size 9562 diff --git a/data/2025/2504_05xxx/2504.05979/images/382030f3e3f0298bca0a14e5cda55ee5859de8f05e70a6182873cc5b53987e77.jpg b/data/2025/2504_05xxx/2504.05979/images/382030f3e3f0298bca0a14e5cda55ee5859de8f05e70a6182873cc5b53987e77.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94a089b27757500d5a880d79bc84c26bc3c3f3fa --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/382030f3e3f0298bca0a14e5cda55ee5859de8f05e70a6182873cc5b53987e77.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac3e65bd3478523a0a293b748bfeb5953e1870f9a72baa85acaab02bf1929060 +size 21144 diff --git a/data/2025/2504_05xxx/2504.05979/images/386ab41549ccbbdcb6953f2cfb0c22f29b59c77cb08a11e2d82aaf042ab0a3a4.jpg b/data/2025/2504_05xxx/2504.05979/images/386ab41549ccbbdcb6953f2cfb0c22f29b59c77cb08a11e2d82aaf042ab0a3a4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4bb3af3b8dd64fb7541d661cef36b8fe7a58c337 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/386ab41549ccbbdcb6953f2cfb0c22f29b59c77cb08a11e2d82aaf042ab0a3a4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:110e0a6531958c3fe48577d7cb0b5505de9a49960daa1aafa8d4cc0f4f2510aa +size 12092 diff --git a/data/2025/2504_05xxx/2504.05979/images/38e332ce3a2562225f948c109c071db065296fc2fbfae678e295cb014e1ab37d.jpg b/data/2025/2504_05xxx/2504.05979/images/38e332ce3a2562225f948c109c071db065296fc2fbfae678e295cb014e1ab37d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75f9d7f814aa6f1f4e6a4d3df9cb22f22e945661 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/38e332ce3a2562225f948c109c071db065296fc2fbfae678e295cb014e1ab37d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c204d44049111d724e3cb4aeebc4d10d448fe0dcf513d5b87203c8390b7a822 +size 5783 diff --git a/data/2025/2504_05xxx/2504.05979/images/391fb13f1c421b143115f7495cad2522d8d46310cd74bbe3fd2d5b33986486e1.jpg b/data/2025/2504_05xxx/2504.05979/images/391fb13f1c421b143115f7495cad2522d8d46310cd74bbe3fd2d5b33986486e1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc967cfd6c55efe924d692b7ca93857a71282ffe --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/391fb13f1c421b143115f7495cad2522d8d46310cd74bbe3fd2d5b33986486e1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98ad1f60f23f1ef3f608eaa8ae54ae0fa54afdda980431b97376515d4208e523 +size 10358 diff --git a/data/2025/2504_05xxx/2504.05979/images/396cc4efe9e599f8f4c4b1bad0da4de3244d5eeed20ffeb09471237218d5adde.jpg b/data/2025/2504_05xxx/2504.05979/images/396cc4efe9e599f8f4c4b1bad0da4de3244d5eeed20ffeb09471237218d5adde.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1a1091b75968f385d53ee28e439093e5e4916824 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/396cc4efe9e599f8f4c4b1bad0da4de3244d5eeed20ffeb09471237218d5adde.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e25158ac1c7c77c5e3182fae057febe5f6ec610f0c126cfbefab062be2b82f1 +size 10553 diff --git a/data/2025/2504_05xxx/2504.05979/images/3979db3e010b09be23fdd79665b9946d9fa4adc89d8766528d7afd7b653cf603.jpg b/data/2025/2504_05xxx/2504.05979/images/3979db3e010b09be23fdd79665b9946d9fa4adc89d8766528d7afd7b653cf603.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4da909b6da593e3b9bae2bef6b81546fa1126f9c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3979db3e010b09be23fdd79665b9946d9fa4adc89d8766528d7afd7b653cf603.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02405fe68af84bbdb8b03e547a3af90a8b8b8ba1f1e1f7dfe86731bafe639bf1 +size 8914 diff --git a/data/2025/2504_05xxx/2504.05979/images/39860355d35a8389c1208ff488e0389da02ec58d4919778ab96d9ee07712b2fd.jpg b/data/2025/2504_05xxx/2504.05979/images/39860355d35a8389c1208ff488e0389da02ec58d4919778ab96d9ee07712b2fd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..72ac022d838296fd0f7bb2712e99608d0f4bbbbb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/39860355d35a8389c1208ff488e0389da02ec58d4919778ab96d9ee07712b2fd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb7e30807809620a787b263934f5ac9d086bdd0a786072048c5e1c1e2e9dea51 +size 5481 diff --git a/data/2025/2504_05xxx/2504.05979/images/3986b51693c8099322835c4b144aa50d43241eafeb2e294ac53df6b06556a42b.jpg b/data/2025/2504_05xxx/2504.05979/images/3986b51693c8099322835c4b144aa50d43241eafeb2e294ac53df6b06556a42b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ef89c266f9707045c507694de50f3c19af972e56 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3986b51693c8099322835c4b144aa50d43241eafeb2e294ac53df6b06556a42b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90662ada7b6cfcea88ecb534de24267621f64fc5620e417002a07d55563106d3 +size 11677 diff --git a/data/2025/2504_05xxx/2504.05979/images/39a867d5c76eb0ae5b81b696fa69ab0660b5b5250864e6b56ff6616c1d01cf18.jpg b/data/2025/2504_05xxx/2504.05979/images/39a867d5c76eb0ae5b81b696fa69ab0660b5b5250864e6b56ff6616c1d01cf18.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b3b88a3224ad717057bd6df7868f91f3d2523b60 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/39a867d5c76eb0ae5b81b696fa69ab0660b5b5250864e6b56ff6616c1d01cf18.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:196cad14d44bd1c0ddc3e4af032e577f076cdb38dc962dbb5d61f04b1d12336e +size 8412 diff --git a/data/2025/2504_05xxx/2504.05979/images/3a21a982cd81980080fc0bfe53df60869cb5c77ea93c8479b28806aab9298ca1.jpg b/data/2025/2504_05xxx/2504.05979/images/3a21a982cd81980080fc0bfe53df60869cb5c77ea93c8479b28806aab9298ca1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ecd80c0525f273c8f1926bfe197142edf60e3040 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3a21a982cd81980080fc0bfe53df60869cb5c77ea93c8479b28806aab9298ca1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c66b083c433c8fe5c868362a5c10e3f783eb74e8e863ed7f334f74e5dabb24f +size 11058 diff --git a/data/2025/2504_05xxx/2504.05979/images/3aca5dc86dd3946c68be0172061baa23975fed2075ecacb889e265f89089a1e4.jpg b/data/2025/2504_05xxx/2504.05979/images/3aca5dc86dd3946c68be0172061baa23975fed2075ecacb889e265f89089a1e4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34a7c470fdea0842323f52df3ae841114956fd45 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3aca5dc86dd3946c68be0172061baa23975fed2075ecacb889e265f89089a1e4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:680678ae8a1ef917887612785566fd81193a1ce22c05b0b1061a30588348d23d +size 12889 diff --git a/data/2025/2504_05xxx/2504.05979/images/3ae48117c4aba996216a7bc491ef96a7ef76812eb31cfce77a2b900f543504dc.jpg b/data/2025/2504_05xxx/2504.05979/images/3ae48117c4aba996216a7bc491ef96a7ef76812eb31cfce77a2b900f543504dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ccb89687a00d9181074d641d790bbc4cb71d9a8b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3ae48117c4aba996216a7bc491ef96a7ef76812eb31cfce77a2b900f543504dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0af274d10fe27ba8e883b03f6ca4032d91f116168a4f80f956339157b8c92663 +size 14248 diff --git a/data/2025/2504_05xxx/2504.05979/images/3b08ebeb2ba10b1056a1d7752ce27e2b4990f6b98a2f7acdbabcd32480037452.jpg b/data/2025/2504_05xxx/2504.05979/images/3b08ebeb2ba10b1056a1d7752ce27e2b4990f6b98a2f7acdbabcd32480037452.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7f7935fd5875e0e4fcdeb78290a7d2df3a7f0d34 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3b08ebeb2ba10b1056a1d7752ce27e2b4990f6b98a2f7acdbabcd32480037452.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7476d0b6527515e4f7d8e8bd7692e683ddd31c55796da4ebeed66c62c383574 +size 16316 diff --git a/data/2025/2504_05xxx/2504.05979/images/3b637fcfa8060bbf723d957b7f4a1aa49a5f9d87c744993d0514d9873e23989e.jpg b/data/2025/2504_05xxx/2504.05979/images/3b637fcfa8060bbf723d957b7f4a1aa49a5f9d87c744993d0514d9873e23989e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..047520d7ce2b0af87e7fb9451d6de24684fca820 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3b637fcfa8060bbf723d957b7f4a1aa49a5f9d87c744993d0514d9873e23989e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5408abcbca4210551992d44d3a0cb3a508058d5eeba83ee0cf8d7a782389d226 +size 8499 diff --git a/data/2025/2504_05xxx/2504.05979/images/3bffa6f0cb5545336d1dece3985a3fcaff2ad60da5c231db47b90005c63f4ceb.jpg b/data/2025/2504_05xxx/2504.05979/images/3bffa6f0cb5545336d1dece3985a3fcaff2ad60da5c231db47b90005c63f4ceb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..70bfcc263be103ee7ed58dd1895abcb17eb73783 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3bffa6f0cb5545336d1dece3985a3fcaff2ad60da5c231db47b90005c63f4ceb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6abf98116af8524d26c32565c2c17e4f8113d4d4c14f0a4db61ef7717fd9f10 +size 13142 diff --git a/data/2025/2504_05xxx/2504.05979/images/3c2edc361f25ec06384618fcf44a118efef67ed2c14c3a957eec1c7c432abf66.jpg b/data/2025/2504_05xxx/2504.05979/images/3c2edc361f25ec06384618fcf44a118efef67ed2c14c3a957eec1c7c432abf66.jpg new file mode 100644 index 0000000000000000000000000000000000000000..13a85422fb5bdb918f5cc55f416876dd1eb70144 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3c2edc361f25ec06384618fcf44a118efef67ed2c14c3a957eec1c7c432abf66.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c0939ca71c5f9ca975714c2fc519c23bbe971b012b03cee7b938dbf9bb25027 +size 11909 diff --git a/data/2025/2504_05xxx/2504.05979/images/3dbfbe5de303b95e93930efc7c11ef12558704eeaae8642a301b7dbc59420786.jpg b/data/2025/2504_05xxx/2504.05979/images/3dbfbe5de303b95e93930efc7c11ef12558704eeaae8642a301b7dbc59420786.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ed2da4fbfc8c304201ca0d137a6f2275aa4195f5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3dbfbe5de303b95e93930efc7c11ef12558704eeaae8642a301b7dbc59420786.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9ddf12e034fc7bce1adbbf3b09e1b9e562fa9bac7eb601e8f5cbaa8ba3dde5b +size 5056 diff --git a/data/2025/2504_05xxx/2504.05979/images/3f74eb072bab2b800151c5b569b4d234ff14848a96ea3fa56d91a676cf3c70bf.jpg b/data/2025/2504_05xxx/2504.05979/images/3f74eb072bab2b800151c5b569b4d234ff14848a96ea3fa56d91a676cf3c70bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2abcb15048f78b556137d13b3b64a264691593d6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3f74eb072bab2b800151c5b569b4d234ff14848a96ea3fa56d91a676cf3c70bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21654e82e9c10410600cd2433ceb8e2ad00139b43b0662b8d84e7feeb0aaca89 +size 5692 diff --git a/data/2025/2504_05xxx/2504.05979/images/3fae030462e9df055665edfa8c3602ad144dba999e49b471e3dfd45d576c435a.jpg b/data/2025/2504_05xxx/2504.05979/images/3fae030462e9df055665edfa8c3602ad144dba999e49b471e3dfd45d576c435a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16bfd8c642e70a5258bef01866f9b9955cbd25e5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3fae030462e9df055665edfa8c3602ad144dba999e49b471e3dfd45d576c435a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:233327eb24d506100ddcdd7035093ace5288319846b928c95dafc7ba93417676 +size 12886 diff --git a/data/2025/2504_05xxx/2504.05979/images/3fda08a0f5b089580c34cfd81ff50f99e0d4660564c4d9f7d8ce3de3d5c373c0.jpg b/data/2025/2504_05xxx/2504.05979/images/3fda08a0f5b089580c34cfd81ff50f99e0d4660564c4d9f7d8ce3de3d5c373c0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa8ba8abe764254ca5a47ab2d788961749a467cd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3fda08a0f5b089580c34cfd81ff50f99e0d4660564c4d9f7d8ce3de3d5c373c0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a319bc00621fa7b1934791c225275423f2632da7aa71fa1cc467cbccd8c2f238 +size 8815 diff --git a/data/2025/2504_05xxx/2504.05979/images/3fe16c86a896d908033e9f78ddd28e8ff19dae54bfcbf58eef865c9f9147a115.jpg b/data/2025/2504_05xxx/2504.05979/images/3fe16c86a896d908033e9f78ddd28e8ff19dae54bfcbf58eef865c9f9147a115.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d4193ff6143662341635559aadda4d19836dd9a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/3fe16c86a896d908033e9f78ddd28e8ff19dae54bfcbf58eef865c9f9147a115.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57c19364c53820d583afa5cbd567c886acfb6a7cbb6a3dd95497b67017d63eab +size 7114 diff --git a/data/2025/2504_05xxx/2504.05979/images/40c5465d2843a6e6f41aaecc277b509f4346d85fd64000b3badbfe3d66eb079e.jpg b/data/2025/2504_05xxx/2504.05979/images/40c5465d2843a6e6f41aaecc277b509f4346d85fd64000b3badbfe3d66eb079e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa7e929b5f6469f96afa74f048856d0a298a0270 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/40c5465d2843a6e6f41aaecc277b509f4346d85fd64000b3badbfe3d66eb079e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da0137a360c1732b9156c6bff388126b822a27a7e07afcbf72e14872c304e0b6 +size 11157 diff --git a/data/2025/2504_05xxx/2504.05979/images/40eaf0cc2887a61189056995988a25e411abbb492721dd306939eb435cb2c205.jpg b/data/2025/2504_05xxx/2504.05979/images/40eaf0cc2887a61189056995988a25e411abbb492721dd306939eb435cb2c205.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e750a6c6f13001014adb42d6bb1352e0929e639 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/40eaf0cc2887a61189056995988a25e411abbb492721dd306939eb435cb2c205.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b1e27fbef32abbd24e4bc8fd3165dfd2ee2bc2eba6e00e768938d3e8da89b40 +size 14673 diff --git a/data/2025/2504_05xxx/2504.05979/images/40f481609f3f72c80c1bb3a1911a2bcc98e71fe948326192df5cb8ed5f01b0a6.jpg b/data/2025/2504_05xxx/2504.05979/images/40f481609f3f72c80c1bb3a1911a2bcc98e71fe948326192df5cb8ed5f01b0a6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f768ee9a4e7c2823fee9c6aaa3eead84e8cd8a11 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/40f481609f3f72c80c1bb3a1911a2bcc98e71fe948326192df5cb8ed5f01b0a6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f4619dc8216efbd8f12bd19f5fbc12d0d87e676523ed9765d67afdca619e6f0 +size 4820 diff --git a/data/2025/2504_05xxx/2504.05979/images/40ffe89df9c50ac3b8034fed7000e9df015d18b39c605d05fc2cb755261800bb.jpg b/data/2025/2504_05xxx/2504.05979/images/40ffe89df9c50ac3b8034fed7000e9df015d18b39c605d05fc2cb755261800bb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..493e80efc2cafcbcd9a32ea96be82545d04f47df --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/40ffe89df9c50ac3b8034fed7000e9df015d18b39c605d05fc2cb755261800bb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d5731764769ebf3d161a715b747ff7afc204c480861f2f952129c758f58dd87 +size 8081 diff --git a/data/2025/2504_05xxx/2504.05979/images/413f16191d537cd336a6645867fb4044e2d27954d8e97a6f61443fdadd988968.jpg b/data/2025/2504_05xxx/2504.05979/images/413f16191d537cd336a6645867fb4044e2d27954d8e97a6f61443fdadd988968.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8241806ebbeaab05812146b1ea1603dd5ad2c6d1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/413f16191d537cd336a6645867fb4044e2d27954d8e97a6f61443fdadd988968.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0811bd80d2de94819b5daa8ff08531ab15d47a1305c512361c99a64e7b99b6e +size 10923 diff --git a/data/2025/2504_05xxx/2504.05979/images/41cf37136782a8224523753684cab11c0ddb36c12dfd39ee89ac851285853af2.jpg b/data/2025/2504_05xxx/2504.05979/images/41cf37136782a8224523753684cab11c0ddb36c12dfd39ee89ac851285853af2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d323cd2fcbb269695846c229e5a959cde2c52557 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/41cf37136782a8224523753684cab11c0ddb36c12dfd39ee89ac851285853af2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2cd2fab400ef930a72d1a326d1dacafa4e754dd53078f054b502b390e02c8ef +size 909 diff --git a/data/2025/2504_05xxx/2504.05979/images/41dc891dc5a7c3692e3d168e6b8d7aeecf6ee926ceb54032fac4c7482b335993.jpg b/data/2025/2504_05xxx/2504.05979/images/41dc891dc5a7c3692e3d168e6b8d7aeecf6ee926ceb54032fac4c7482b335993.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f912bda6cfd029b7210c8e25542214a52f02c919 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/41dc891dc5a7c3692e3d168e6b8d7aeecf6ee926ceb54032fac4c7482b335993.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17693f62cb3dc1bff580f2d191a25cdc2439a4cd52c7480737eb3fc55bd85f20 +size 10403 diff --git a/data/2025/2504_05xxx/2504.05979/images/41fa18b12458faa6a8dc00ecb9ddebef31263254df2985d4bd17d39f0aee636c.jpg b/data/2025/2504_05xxx/2504.05979/images/41fa18b12458faa6a8dc00ecb9ddebef31263254df2985d4bd17d39f0aee636c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4f16385479386c8b342e6c33cdc3acd16c8b8c8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/41fa18b12458faa6a8dc00ecb9ddebef31263254df2985d4bd17d39f0aee636c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2ad76691c42eb43ea5516c4ea566507d22aa2f58486eef96e440f0468f33534 +size 12404 diff --git a/data/2025/2504_05xxx/2504.05979/images/425b5ab97efea03f5a9f305b7eb62c794a077f423e347f1fbc1c194faca7ed85.jpg b/data/2025/2504_05xxx/2504.05979/images/425b5ab97efea03f5a9f305b7eb62c794a077f423e347f1fbc1c194faca7ed85.jpg new file mode 100644 index 0000000000000000000000000000000000000000..824ca3254949a9eae6a47d9cc1b0596fedda6ab4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/425b5ab97efea03f5a9f305b7eb62c794a077f423e347f1fbc1c194faca7ed85.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:429a889dd3d492f7865005da805cce4038bdf186a3edbc1c16b308b5b1310554 +size 9988 diff --git a/data/2025/2504_05xxx/2504.05979/images/4269989ea478ceccbce68d7fb7a7ad5b3bda0673dc3e29b9f2ccd7951d3cd19a.jpg b/data/2025/2504_05xxx/2504.05979/images/4269989ea478ceccbce68d7fb7a7ad5b3bda0673dc3e29b9f2ccd7951d3cd19a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c1178d9a13771efc065ad2645aa27b562831cd83 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4269989ea478ceccbce68d7fb7a7ad5b3bda0673dc3e29b9f2ccd7951d3cd19a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce2701b9cc087ef9e6a71a1f1ab181a3e69956f7b3439bdda23f209093587060 +size 8993 diff --git a/data/2025/2504_05xxx/2504.05979/images/4425fde2f8933a0d0a27e3c8934be18f51d1e042682ca256127948d03c7d8026.jpg b/data/2025/2504_05xxx/2504.05979/images/4425fde2f8933a0d0a27e3c8934be18f51d1e042682ca256127948d03c7d8026.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd057290fe496302f6ea0e341fc7ef4affed4f4c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4425fde2f8933a0d0a27e3c8934be18f51d1e042682ca256127948d03c7d8026.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cb1f83eb0e01116cbd3c5d57febe4ca9da59aa820ff2f1b10f1c1d868faeffb +size 9004 diff --git a/data/2025/2504_05xxx/2504.05979/images/448a4c7fafc6c14e57593c609ff7476f3fc55ea22624bc38b74099fe39fc507a.jpg b/data/2025/2504_05xxx/2504.05979/images/448a4c7fafc6c14e57593c609ff7476f3fc55ea22624bc38b74099fe39fc507a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e606b2e55ea803acad3b45830e48f2d67ce0aad8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/448a4c7fafc6c14e57593c609ff7476f3fc55ea22624bc38b74099fe39fc507a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7202df318218d92695cbd68d1c58456c771abb04b18f0451315a8290f6002f5 +size 10823 diff --git a/data/2025/2504_05xxx/2504.05979/images/44a15d6fa321c083f59b0c39c8f37dd0512f34e39c60f7e70976d6279609306a.jpg b/data/2025/2504_05xxx/2504.05979/images/44a15d6fa321c083f59b0c39c8f37dd0512f34e39c60f7e70976d6279609306a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d5d4d2c1aaa715f99a52ac217c271e5700f120a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/44a15d6fa321c083f59b0c39c8f37dd0512f34e39c60f7e70976d6279609306a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd530057aab93b8784836451d966083deaea70ba6923498e149bdfb56d13ed37 +size 14548 diff --git a/data/2025/2504_05xxx/2504.05979/images/44a28c987decc43c2c7016a554b05881a1fabeb9cb87f073992406fc3bf4eeb3.jpg b/data/2025/2504_05xxx/2504.05979/images/44a28c987decc43c2c7016a554b05881a1fabeb9cb87f073992406fc3bf4eeb3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..01065c537e5b4b930875fae1fada045a1ea89168 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/44a28c987decc43c2c7016a554b05881a1fabeb9cb87f073992406fc3bf4eeb3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32160c9b9597f8f531205f3b57fac1cc2665bfb1d45c5ddee11fcfecf926f768 +size 14529 diff --git a/data/2025/2504_05xxx/2504.05979/images/452eb4a0a98c3bffd9b5ca2149205d5cd34827599c8caa097cbd6d949a07b14c.jpg b/data/2025/2504_05xxx/2504.05979/images/452eb4a0a98c3bffd9b5ca2149205d5cd34827599c8caa097cbd6d949a07b14c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d8e8e7b9fe679d29c94332079e4819bc4c0d6658 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/452eb4a0a98c3bffd9b5ca2149205d5cd34827599c8caa097cbd6d949a07b14c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c3997825c987157dba670b1298fce3af16178e08a517c038367472f14ed687a +size 6689 diff --git a/data/2025/2504_05xxx/2504.05979/images/454c99016219342e261335a7e82a9c5e7a05daa487fd74aa7d33328c05c7b4b4.jpg b/data/2025/2504_05xxx/2504.05979/images/454c99016219342e261335a7e82a9c5e7a05daa487fd74aa7d33328c05c7b4b4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6186933dcfc9bf180c64159516b462a1a7e8d0e3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/454c99016219342e261335a7e82a9c5e7a05daa487fd74aa7d33328c05c7b4b4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:febe8bad5d1d6dc11ed792134cfaf7496b77ceb0329386e6d23c27d7c2def377 +size 12198 diff --git a/data/2025/2504_05xxx/2504.05979/images/45547d6bbb7a37e719ea603008ab1f1389806043fdae0a8ed425ac866ed4b27c.jpg b/data/2025/2504_05xxx/2504.05979/images/45547d6bbb7a37e719ea603008ab1f1389806043fdae0a8ed425ac866ed4b27c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6740ad6c0e5ba70b18d113b0940c82b1b7ae56ea --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/45547d6bbb7a37e719ea603008ab1f1389806043fdae0a8ed425ac866ed4b27c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2fce7cec04dd0c882e33cfb77634bee97df26c35a2ff528722e2a9d1a4ac838 +size 14399 diff --git a/data/2025/2504_05xxx/2504.05979/images/45630d882ee83bb0a62e1013d838d630afca2a25e99127c4ab251b2d79d6652d.jpg b/data/2025/2504_05xxx/2504.05979/images/45630d882ee83bb0a62e1013d838d630afca2a25e99127c4ab251b2d79d6652d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c5dab0bb5a9517a2d747738cf8c5e4ede5f018f7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/45630d882ee83bb0a62e1013d838d630afca2a25e99127c4ab251b2d79d6652d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0c0d2edf027943e5b6156b7c2d4439d19f1175a9a5d3e6e3ba2e39d6a4aec67 +size 10215 diff --git a/data/2025/2504_05xxx/2504.05979/images/4569221e0b0de61258bfb9ba425fbed4cda37201c120d48edf92be4e3bff1d5c.jpg b/data/2025/2504_05xxx/2504.05979/images/4569221e0b0de61258bfb9ba425fbed4cda37201c120d48edf92be4e3bff1d5c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d785cf71e6c7560ce258cee30741a1c32736a40 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4569221e0b0de61258bfb9ba425fbed4cda37201c120d48edf92be4e3bff1d5c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b3aa0ff55b84bede45053f4bb0a593cf7ca38d79de8f5a11ba42161d59090aa +size 16782 diff --git a/data/2025/2504_05xxx/2504.05979/images/45c315f9d4b26f49ce32764e260ffb24ab385bf338d6917e2d37d6b26b242016.jpg b/data/2025/2504_05xxx/2504.05979/images/45c315f9d4b26f49ce32764e260ffb24ab385bf338d6917e2d37d6b26b242016.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b2f3c7ca9dcfe570e0a166b2973d2e8b5792abd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/45c315f9d4b26f49ce32764e260ffb24ab385bf338d6917e2d37d6b26b242016.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27ebc39e88fa0dd830082a350e3b263992e46d176b71962dd7ea2c9637e9d2dc +size 5246 diff --git a/data/2025/2504_05xxx/2504.05979/images/46017199672cdc547f9700dfe377960c656d99e3ef6cc597d38c0dbafddd087b.jpg b/data/2025/2504_05xxx/2504.05979/images/46017199672cdc547f9700dfe377960c656d99e3ef6cc597d38c0dbafddd087b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..41719851c4afe5e97e5aafc356a13fec6cb840a0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/46017199672cdc547f9700dfe377960c656d99e3ef6cc597d38c0dbafddd087b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abd46be5278b832a59c6cea236a3214ff8f4e3f832ab6e9efeef12ef4b1ed998 +size 7786 diff --git a/data/2025/2504_05xxx/2504.05979/images/460d34550756a1743afe1027e9058e86963ba9586255d860b0f2b1c270b9280e.jpg b/data/2025/2504_05xxx/2504.05979/images/460d34550756a1743afe1027e9058e86963ba9586255d860b0f2b1c270b9280e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34e99f3ff433f0e826a047bd49a6d071acd7bc3a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/460d34550756a1743afe1027e9058e86963ba9586255d860b0f2b1c270b9280e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80b0ed33bae0c8b4125c6d9d1e272be377d0832360b23ebc6e474feed0d9d48e +size 12560 diff --git a/data/2025/2504_05xxx/2504.05979/images/4610ae5599e3f85d65d10713443204f4af850e0751404e619cfc018f179db18b.jpg b/data/2025/2504_05xxx/2504.05979/images/4610ae5599e3f85d65d10713443204f4af850e0751404e619cfc018f179db18b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f20e7e040cc1c7bf5895bfba785cf6f2c16d5780 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4610ae5599e3f85d65d10713443204f4af850e0751404e619cfc018f179db18b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:207c46a16c44be47cd0fac1b3dbecae142f81cfada8f9ce02f3544c42ae78710 +size 8225 diff --git a/data/2025/2504_05xxx/2504.05979/images/461d26dcff028be557e2efbcb1789125eb28fa946c8494b2dc6491aefc57fa30.jpg b/data/2025/2504_05xxx/2504.05979/images/461d26dcff028be557e2efbcb1789125eb28fa946c8494b2dc6491aefc57fa30.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0da577a4fe85e939ff98e090684f4272ad47379f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/461d26dcff028be557e2efbcb1789125eb28fa946c8494b2dc6491aefc57fa30.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19b27e72595f62cfd060565c0b59bfbd92683d447d9abd7f60ff1969392e3173 +size 3298 diff --git a/data/2025/2504_05xxx/2504.05979/images/46b577cbc8feb431b513caaf0fa800f1004a3c0cb4738f315c72a97f38bb97c4.jpg b/data/2025/2504_05xxx/2504.05979/images/46b577cbc8feb431b513caaf0fa800f1004a3c0cb4738f315c72a97f38bb97c4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd2399b156780836aa0538aa84ad95357faf4055 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/46b577cbc8feb431b513caaf0fa800f1004a3c0cb4738f315c72a97f38bb97c4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d417e3f4c4ff5c31d8dee97c63105d6056bc9ebbca3b1c464f43a31f1645f995 +size 18287 diff --git a/data/2025/2504_05xxx/2504.05979/images/46dec341162765728e1fc8f0cd002ebf4ee74f97cd953e898c4270f10f2a5328.jpg b/data/2025/2504_05xxx/2504.05979/images/46dec341162765728e1fc8f0cd002ebf4ee74f97cd953e898c4270f10f2a5328.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e04e94ca3f8890fe8e91113d7186f37585bdffd0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/46dec341162765728e1fc8f0cd002ebf4ee74f97cd953e898c4270f10f2a5328.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8eca3a1a91e50e1407c054567267b8936312c38ca68c5574986946617b949691 +size 16627 diff --git a/data/2025/2504_05xxx/2504.05979/images/4702bc6682345e1ecd0d870da7f607f3851d574f86f2af27b69b7ce8feb8d7b3.jpg b/data/2025/2504_05xxx/2504.05979/images/4702bc6682345e1ecd0d870da7f607f3851d574f86f2af27b69b7ce8feb8d7b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e1e2a70bc861b2f28a716b5fd944be56aad4009 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4702bc6682345e1ecd0d870da7f607f3851d574f86f2af27b69b7ce8feb8d7b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19beb1973402513ca336305bc9fface746b89c2fc328920f52c22a4835594075 +size 5099 diff --git a/data/2025/2504_05xxx/2504.05979/images/470abd7050918de412b06326cdd51c4cece55e61b2888569447852d1b87f591d.jpg b/data/2025/2504_05xxx/2504.05979/images/470abd7050918de412b06326cdd51c4cece55e61b2888569447852d1b87f591d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f11b75c1b10157dc533a18dae088401d169ab4f7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/470abd7050918de412b06326cdd51c4cece55e61b2888569447852d1b87f591d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:885c9db4e88176ce91cce4e773a7acfbb0ae5cfd728aa5f6b9f6a019492be09a +size 14494 diff --git a/data/2025/2504_05xxx/2504.05979/images/4717d46544cd52b4e89e284f9ced1f798ee955d4101bcead34498865073ada80.jpg b/data/2025/2504_05xxx/2504.05979/images/4717d46544cd52b4e89e284f9ced1f798ee955d4101bcead34498865073ada80.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9198ab9d6ab8c373ddfea37735cbe6305b586069 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4717d46544cd52b4e89e284f9ced1f798ee955d4101bcead34498865073ada80.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e07acd8e4d1f8313de491dd5b0bfdf424dccc7970dde0f44c354bbd1603974a2 +size 10833 diff --git a/data/2025/2504_05xxx/2504.05979/images/47297a815dfe05f13941cb00e869c4a19a92d4533649e2a583bafd94faf893c9.jpg b/data/2025/2504_05xxx/2504.05979/images/47297a815dfe05f13941cb00e869c4a19a92d4533649e2a583bafd94faf893c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2e2158547e11e3b816d7bedf2fcdfe377a13e21b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/47297a815dfe05f13941cb00e869c4a19a92d4533649e2a583bafd94faf893c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7d079991325f1ac9376935be7d27cb88a41dc4eda323bca2ef54e1b91477c4c +size 17486 diff --git a/data/2025/2504_05xxx/2504.05979/images/473d5c4fa40852da1be956bd91fc8dbb01033d977f73e000514033c57935eebb.jpg b/data/2025/2504_05xxx/2504.05979/images/473d5c4fa40852da1be956bd91fc8dbb01033d977f73e000514033c57935eebb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c9cd6f123b275634b3a9056b2a1d66c01516949 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/473d5c4fa40852da1be956bd91fc8dbb01033d977f73e000514033c57935eebb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83d4455361602ed19f13ccaa1c8bc29146b87df33a081fb0fc9f884b84ab6eac +size 20270 diff --git a/data/2025/2504_05xxx/2504.05979/images/475d909f8bd94a4e962a1af7f117f1db9f76e1f2cd7047ccbdf158cec0cbc2b2.jpg b/data/2025/2504_05xxx/2504.05979/images/475d909f8bd94a4e962a1af7f117f1db9f76e1f2cd7047ccbdf158cec0cbc2b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e39fdc92482ae42f73e3c9bb80b7ed7effabd81c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/475d909f8bd94a4e962a1af7f117f1db9f76e1f2cd7047ccbdf158cec0cbc2b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51872002194f76c57541dd500bab85b43f498b03f61f996be6c550f45a56007b +size 11829 diff --git a/data/2025/2504_05xxx/2504.05979/images/47c0aa44b5c1e668ed4e267b28409511fe44987d1f237b43ee033cba301e51ee.jpg b/data/2025/2504_05xxx/2504.05979/images/47c0aa44b5c1e668ed4e267b28409511fe44987d1f237b43ee033cba301e51ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd1fa97361aaa876071276caaa9d4aca03c102b4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/47c0aa44b5c1e668ed4e267b28409511fe44987d1f237b43ee033cba301e51ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c28c3c10aa1345ec86e096ea3cfe9ccc938802f1812f88eb7264dc16d84d3488 +size 9374 diff --git a/data/2025/2504_05xxx/2504.05979/images/484e31e49c1e7a2ebd3c367ae0db8fcd10716584cec8c8e06beaca0f6c7a0381.jpg b/data/2025/2504_05xxx/2504.05979/images/484e31e49c1e7a2ebd3c367ae0db8fcd10716584cec8c8e06beaca0f6c7a0381.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d6bb2b9d836ed384d42cbce92b044e03a9ffc4e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/484e31e49c1e7a2ebd3c367ae0db8fcd10716584cec8c8e06beaca0f6c7a0381.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51f6ab3c8bdbb2168745eaa53feabc564054dac8ae091b8d8892292c339f085f +size 18687 diff --git a/data/2025/2504_05xxx/2504.05979/images/4851ec2102693629a48d4c59a074117a859cc53c4e7a1a739b2b4623d6768712.jpg b/data/2025/2504_05xxx/2504.05979/images/4851ec2102693629a48d4c59a074117a859cc53c4e7a1a739b2b4623d6768712.jpg new file mode 100644 index 0000000000000000000000000000000000000000..50ac20fd236344782efae4e66c528457fb2aa0d5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4851ec2102693629a48d4c59a074117a859cc53c4e7a1a739b2b4623d6768712.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6531fd5392f74133068339cf0ce4a7b50177fa4619b8d0ae5e20a04319ad264c +size 4817 diff --git a/data/2025/2504_05xxx/2504.05979/images/48601f43a53bfc38e48cf7cca862cb1f50808dd99f5c4af851716147ddce8e55.jpg b/data/2025/2504_05xxx/2504.05979/images/48601f43a53bfc38e48cf7cca862cb1f50808dd99f5c4af851716147ddce8e55.jpg new file mode 100644 index 0000000000000000000000000000000000000000..da4b2f2b385dc5ff7ead8345ed96b208fbfad753 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/48601f43a53bfc38e48cf7cca862cb1f50808dd99f5c4af851716147ddce8e55.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b542a869012ce9cc4306a27c4fb11db9843e01586c395b6f05ed892781d63fb +size 3359 diff --git a/data/2025/2504_05xxx/2504.05979/images/48b98ec70c96ab029b0a162958ffc7baf258a979ab48209783328fd17cc0c608.jpg b/data/2025/2504_05xxx/2504.05979/images/48b98ec70c96ab029b0a162958ffc7baf258a979ab48209783328fd17cc0c608.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8988bf98d63fe3926f90e79036685591a13e48c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/48b98ec70c96ab029b0a162958ffc7baf258a979ab48209783328fd17cc0c608.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9531a466b96ace1530edceda7c25246ca39c4347018a51ce1262626cd1acd44d +size 11409 diff --git a/data/2025/2504_05xxx/2504.05979/images/49309cffd79e60cb0270f525f65a35ec7a1dda136d91a8690b5a4235fa3ad428.jpg b/data/2025/2504_05xxx/2504.05979/images/49309cffd79e60cb0270f525f65a35ec7a1dda136d91a8690b5a4235fa3ad428.jpg new file mode 100644 index 0000000000000000000000000000000000000000..362ef861cca336e024b38f9d99495c8e5fd36e6f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/49309cffd79e60cb0270f525f65a35ec7a1dda136d91a8690b5a4235fa3ad428.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dce0e02b23886116d57b94ba75d96d212f9e852d790e841314731188fc13d384 +size 11413 diff --git a/data/2025/2504_05xxx/2504.05979/images/494f4bc560feb0a0c69c549d9e94d552cb214aa34f2e532c73c99c615ff3a691.jpg b/data/2025/2504_05xxx/2504.05979/images/494f4bc560feb0a0c69c549d9e94d552cb214aa34f2e532c73c99c615ff3a691.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eaa00f4e5250e0ee380606b84adda4de6d0de808 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/494f4bc560feb0a0c69c549d9e94d552cb214aa34f2e532c73c99c615ff3a691.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd8080d12b46809857ae3b35cfd5b2571d43d4579dbbf2b269b84964b9b922d4 +size 11458 diff --git a/data/2025/2504_05xxx/2504.05979/images/4973b35320e8a2b303648fca1abc0044f2a37f44f7b47f898eb02a0074a2a7ba.jpg b/data/2025/2504_05xxx/2504.05979/images/4973b35320e8a2b303648fca1abc0044f2a37f44f7b47f898eb02a0074a2a7ba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd045e5191dd0fa031ee18010d2dc883008cf697 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4973b35320e8a2b303648fca1abc0044f2a37f44f7b47f898eb02a0074a2a7ba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9a32252195e9ea846209f6952841c5d4be679e34e41f34fd12c3f382da5262f +size 6657 diff --git a/data/2025/2504_05xxx/2504.05979/images/49f93c593df590166d47657e6c0eda2ed7ef62cdd094e685de672efaec343681.jpg b/data/2025/2504_05xxx/2504.05979/images/49f93c593df590166d47657e6c0eda2ed7ef62cdd094e685de672efaec343681.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6c7156b46eebce9f126fbd86176fc010f9f79b0d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/49f93c593df590166d47657e6c0eda2ed7ef62cdd094e685de672efaec343681.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:664ea0dcca8fa4e1ecd7bd17b3b1f8a0e7bcf725e1ff2322f0af09af99c1e57c +size 19890 diff --git a/data/2025/2504_05xxx/2504.05979/images/4a7dfd47b3b4f45d4ea17721125d0fe632c1243e9f989c4ef96f7157ca1eac3f.jpg b/data/2025/2504_05xxx/2504.05979/images/4a7dfd47b3b4f45d4ea17721125d0fe632c1243e9f989c4ef96f7157ca1eac3f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3cdcbde98889aa49cfae97fa99504935851e0a89 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4a7dfd47b3b4f45d4ea17721125d0fe632c1243e9f989c4ef96f7157ca1eac3f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c2a7246e08f740f772e3c9475c80040cf1fd862a6d47dfd0cbbcfe946244bfc +size 8927 diff --git a/data/2025/2504_05xxx/2504.05979/images/4ae43763ef324638250f7125268cf0a0cd1fadff5496f7eae1707c4aaad3ee1c.jpg b/data/2025/2504_05xxx/2504.05979/images/4ae43763ef324638250f7125268cf0a0cd1fadff5496f7eae1707c4aaad3ee1c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8015f73a8b6a4eb6131a0bda07ad63f9dbeaf7b0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4ae43763ef324638250f7125268cf0a0cd1fadff5496f7eae1707c4aaad3ee1c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e93321ac60a479cb983c82b2b30e7d80de0a8b0b58be245cb12d046fd9fc1d77 +size 11072 diff --git a/data/2025/2504_05xxx/2504.05979/images/4ae472a9c88dd869cf1cee61a0638cf7a434e0355bfd8577ac37c979ef8d008a.jpg b/data/2025/2504_05xxx/2504.05979/images/4ae472a9c88dd869cf1cee61a0638cf7a434e0355bfd8577ac37c979ef8d008a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ae97dbe39a1e5e8fe8463fc0e46978e59138caa6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4ae472a9c88dd869cf1cee61a0638cf7a434e0355bfd8577ac37c979ef8d008a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bd7fd9707db6a4a12db70ab80585f217e19fcf9d98233bfe401571ea4e8d3f9 +size 5508 diff --git a/data/2025/2504_05xxx/2504.05979/images/4b00229ed17a9f68b0777d94b5bc1f08385f38b87d3759265475e7b14d5c6df0.jpg b/data/2025/2504_05xxx/2504.05979/images/4b00229ed17a9f68b0777d94b5bc1f08385f38b87d3759265475e7b14d5c6df0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a242b60c210604981eb11376a01e97634b60244d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4b00229ed17a9f68b0777d94b5bc1f08385f38b87d3759265475e7b14d5c6df0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34dbef8add5ba1a9212dc364a418dea6783ee21489e8df2f08074ca77055afdf +size 12739 diff --git a/data/2025/2504_05xxx/2504.05979/images/4b28cc6dcc84678e3173e8fa682a9dc064ea0f9777f5f93daeba4307f5cb80ec.jpg b/data/2025/2504_05xxx/2504.05979/images/4b28cc6dcc84678e3173e8fa682a9dc064ea0f9777f5f93daeba4307f5cb80ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..973ab3a31502794a2233037494033b446e2b8c53 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4b28cc6dcc84678e3173e8fa682a9dc064ea0f9777f5f93daeba4307f5cb80ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb39c1a1a765c5d2949bc8eb1a81eab2570bea0f08f6037b6b64e50c057a31d7 +size 9923 diff --git a/data/2025/2504_05xxx/2504.05979/images/4b7ed5536998e9b32588208e95ba79958f7565c5f1a32016788821d35ecd191a.jpg b/data/2025/2504_05xxx/2504.05979/images/4b7ed5536998e9b32588208e95ba79958f7565c5f1a32016788821d35ecd191a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..62534cb886737ec8a0264b82c78db9136c113cd6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4b7ed5536998e9b32588208e95ba79958f7565c5f1a32016788821d35ecd191a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cf4c254258dd8cd897f4608e5cf534c45e14e23e75d5b1577cc171ce33c45a6 +size 14567 diff --git a/data/2025/2504_05xxx/2504.05979/images/4ba3110052c7723a701e2ccf5e847ad8c21956fcb34a978c1e55cce340d0131c.jpg b/data/2025/2504_05xxx/2504.05979/images/4ba3110052c7723a701e2ccf5e847ad8c21956fcb34a978c1e55cce340d0131c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d13e3c025617bcbc180eae355817493b14b1e1a2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4ba3110052c7723a701e2ccf5e847ad8c21956fcb34a978c1e55cce340d0131c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88abb4dd3afba46bdf08de7890ad5a29d9aa49ad5177663b45736fa03f3e22fb +size 7845 diff --git a/data/2025/2504_05xxx/2504.05979/images/4bad73d42a2bdd07f8a75924ab1f205dab5f3e1919f244ff0bab551eb3c0c55d.jpg b/data/2025/2504_05xxx/2504.05979/images/4bad73d42a2bdd07f8a75924ab1f205dab5f3e1919f244ff0bab551eb3c0c55d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c3f9a3da988f1b6fd7569a16c742ca19ff7c583 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4bad73d42a2bdd07f8a75924ab1f205dab5f3e1919f244ff0bab551eb3c0c55d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f562dc67b0bca1f953ab47e4b3d5cc94c612ef07a16a642eeaf9f63e7d5840ab +size 8702 diff --git a/data/2025/2504_05xxx/2504.05979/images/4bd0fc1b08f65d2b90caeb19644fda48d0c14d56f7a3b6d5b0258145a86adade.jpg b/data/2025/2504_05xxx/2504.05979/images/4bd0fc1b08f65d2b90caeb19644fda48d0c14d56f7a3b6d5b0258145a86adade.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3c4bcfb50a813436291a1b905a7fdd4e393ef44 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4bd0fc1b08f65d2b90caeb19644fda48d0c14d56f7a3b6d5b0258145a86adade.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:475a87eededcc443ddcf68e4134d253a9be1d844b0aca7dd35c5b13ada107004 +size 7578 diff --git a/data/2025/2504_05xxx/2504.05979/images/4bdca3b7ab2d59c6d6c1458affcfac976bc7751cd6959565ea93ddf768f48481.jpg b/data/2025/2504_05xxx/2504.05979/images/4bdca3b7ab2d59c6d6c1458affcfac976bc7751cd6959565ea93ddf768f48481.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d5e40fa4e033aa339f7c18f9d78ff266aebaa797 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4bdca3b7ab2d59c6d6c1458affcfac976bc7751cd6959565ea93ddf768f48481.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cbb10ebedf707d052d2ad44858403565dc7ffd333dfeb453a67e24334de30a5 +size 11146 diff --git a/data/2025/2504_05xxx/2504.05979/images/4c521d5dabb23441105ce25f3e4ea4fe5cdfd75cbe5ba899f0fa357d7cbf73e0.jpg b/data/2025/2504_05xxx/2504.05979/images/4c521d5dabb23441105ce25f3e4ea4fe5cdfd75cbe5ba899f0fa357d7cbf73e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9ec21f83fff9a4b48483614de94080e0355e1537 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4c521d5dabb23441105ce25f3e4ea4fe5cdfd75cbe5ba899f0fa357d7cbf73e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e02151dfadd896c57e26d5bdefd50a3681b172182791a594ab63cfc4f7eced88 +size 12116 diff --git a/data/2025/2504_05xxx/2504.05979/images/4c59a95a5b0ba1c9ed0c407b8724156fd81dd1c21744e6e8075190bae52e10fd.jpg b/data/2025/2504_05xxx/2504.05979/images/4c59a95a5b0ba1c9ed0c407b8724156fd81dd1c21744e6e8075190bae52e10fd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b5b3404c5d8b17407c4e12db985fb30dee0ff5e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4c59a95a5b0ba1c9ed0c407b8724156fd81dd1c21744e6e8075190bae52e10fd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d1c6b63c62d34a91474ad5941a594946411593eec1c1ca72724db875f5c0d54 +size 10039 diff --git a/data/2025/2504_05xxx/2504.05979/images/4cb8d78d25db9472f21ac15da10a306c42418c98f395c4de73dab05c66763de4.jpg b/data/2025/2504_05xxx/2504.05979/images/4cb8d78d25db9472f21ac15da10a306c42418c98f395c4de73dab05c66763de4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d8c20bea096b3b6dcaaadc1d3e8c88a11a5f1586 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4cb8d78d25db9472f21ac15da10a306c42418c98f395c4de73dab05c66763de4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cc77b155ec442a94a1d829a07c68eb3c8c99e531909f3a22aadd7b2a1928fa7 +size 9680 diff --git a/data/2025/2504_05xxx/2504.05979/images/4cefd3a472618bf4ed1b900b8f0253d8ca072ad60e16e238d601280a1ff4ac20.jpg b/data/2025/2504_05xxx/2504.05979/images/4cefd3a472618bf4ed1b900b8f0253d8ca072ad60e16e238d601280a1ff4ac20.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bde19006b08e0771b24d9548d033dcbb65da4fa3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4cefd3a472618bf4ed1b900b8f0253d8ca072ad60e16e238d601280a1ff4ac20.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfd49d7758abc3b803271517ce777e88315a84c7c31eec13d0de653fb0290f50 +size 5801 diff --git a/data/2025/2504_05xxx/2504.05979/images/4d7cf29e3244126ed31062342f32c73674817be1d0079f2da561d4e75f3368e2.jpg b/data/2025/2504_05xxx/2504.05979/images/4d7cf29e3244126ed31062342f32c73674817be1d0079f2da561d4e75f3368e2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f9ed61d3b3f21afb16b6f97cfbcaf829dfeab3e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4d7cf29e3244126ed31062342f32c73674817be1d0079f2da561d4e75f3368e2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:859beb6828a28067f9d9aa21207ac34d0086c6d8c093a9af4c044d2f7b5aadfa +size 22424 diff --git a/data/2025/2504_05xxx/2504.05979/images/4d9a72541da86687fce106cdee64c185f7126d2f2e035a9623361561c2006136.jpg b/data/2025/2504_05xxx/2504.05979/images/4d9a72541da86687fce106cdee64c185f7126d2f2e035a9623361561c2006136.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b409310d91fbc7e458764c05848bc17ec74adc14 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4d9a72541da86687fce106cdee64c185f7126d2f2e035a9623361561c2006136.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:479e4d5962cec03731e2da93a05a7681826138e0d71419b364e0a59d7592f19c +size 10669 diff --git a/data/2025/2504_05xxx/2504.05979/images/4dbac1245c6ad15005db4a1d3ccb5720d30e6be34a344f45e008b6f608fc4cb6.jpg b/data/2025/2504_05xxx/2504.05979/images/4dbac1245c6ad15005db4a1d3ccb5720d30e6be34a344f45e008b6f608fc4cb6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..12ffe8da58429b46f2edc70de38d73d9824f5f69 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4dbac1245c6ad15005db4a1d3ccb5720d30e6be34a344f45e008b6f608fc4cb6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:353e954932ef13ac0f0f81b167bd39587ff806e36642d028c0abcb71a2c4f5df +size 8348 diff --git a/data/2025/2504_05xxx/2504.05979/images/4e2bd472765d312f5f2fa59f23f64b308cadfa60ddeaef7c74ca0effcf81c1de.jpg b/data/2025/2504_05xxx/2504.05979/images/4e2bd472765d312f5f2fa59f23f64b308cadfa60ddeaef7c74ca0effcf81c1de.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b9f043ef93d462a41001802463a017af78982033 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4e2bd472765d312f5f2fa59f23f64b308cadfa60ddeaef7c74ca0effcf81c1de.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73346247a1f64cfbeb42b0c77b3da0f5515aa3dab2961379b07f35bc192d0b57 +size 8935 diff --git a/data/2025/2504_05xxx/2504.05979/images/4ef72f42ea0ddf4ee1ec0367601e96f40b0391ec4c9fa27f967f8b50716f6305.jpg b/data/2025/2504_05xxx/2504.05979/images/4ef72f42ea0ddf4ee1ec0367601e96f40b0391ec4c9fa27f967f8b50716f6305.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f6da467866b3e0f327ab457a20d9e36a7032e7bf --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4ef72f42ea0ddf4ee1ec0367601e96f40b0391ec4c9fa27f967f8b50716f6305.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f4122b796946f556bdf6202a39b174195180ad29e375750c1736b0292ac5713 +size 973 diff --git a/data/2025/2504_05xxx/2504.05979/images/4f0a2ccea09917b8073951d104a30402dee27f0a79f45727ae62f4aca6252dac.jpg b/data/2025/2504_05xxx/2504.05979/images/4f0a2ccea09917b8073951d104a30402dee27f0a79f45727ae62f4aca6252dac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9bc401244e94db1e9b789457763e56fa5c755bf3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4f0a2ccea09917b8073951d104a30402dee27f0a79f45727ae62f4aca6252dac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edead1e6d3e630aae5ae28181ffb7696b7dcef65675c7e8a75c587c66c92eb6f +size 15475 diff --git a/data/2025/2504_05xxx/2504.05979/images/4f2a82196d91323ac6987c501fe192b87a37b8c7bd49cfefc255b54f649c96f1.jpg b/data/2025/2504_05xxx/2504.05979/images/4f2a82196d91323ac6987c501fe192b87a37b8c7bd49cfefc255b54f649c96f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..522b7ba0871560173ee459b1a155102e0b584cea --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4f2a82196d91323ac6987c501fe192b87a37b8c7bd49cfefc255b54f649c96f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c669a6a01a508a13ce2b4c347a3c4762d0a6ca51963601cc445c36c76bad8ee +size 977 diff --git a/data/2025/2504_05xxx/2504.05979/images/4f4a54c95a2c61f0b917046158f22c0ac2fb4eaa7c0013af0028c8b141cd8788.jpg b/data/2025/2504_05xxx/2504.05979/images/4f4a54c95a2c61f0b917046158f22c0ac2fb4eaa7c0013af0028c8b141cd8788.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b5e58ea70956a38415b8f6f04093d0d3213d56dd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4f4a54c95a2c61f0b917046158f22c0ac2fb4eaa7c0013af0028c8b141cd8788.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68899c7a72a13d64db478369d8e1a78b8dccb877f1d49fd90b4f7688c141ace0 +size 8655 diff --git a/data/2025/2504_05xxx/2504.05979/images/4f5025e7e13970ffe6ae4d344132af9a438277305e7345db9006ce618d0573fb.jpg b/data/2025/2504_05xxx/2504.05979/images/4f5025e7e13970ffe6ae4d344132af9a438277305e7345db9006ce618d0573fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a60c219d1cfdee59b75109ebfcc1e976b3b97707 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4f5025e7e13970ffe6ae4d344132af9a438277305e7345db9006ce618d0573fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c77ccdc54e123a1cbc9fc55659c7162a43618a9cf6019709fab3f58a4055ad6c +size 7738 diff --git a/data/2025/2504_05xxx/2504.05979/images/4f6fc9b2404193b922604fdb04aa7ba553fa2808adea7ae850000fdbcf1459d2.jpg b/data/2025/2504_05xxx/2504.05979/images/4f6fc9b2404193b922604fdb04aa7ba553fa2808adea7ae850000fdbcf1459d2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..333450adb39e1d2173aab5887f5c9b54a55492e1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4f6fc9b2404193b922604fdb04aa7ba553fa2808adea7ae850000fdbcf1459d2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfe1fd953a113c89f213b5c0c691cbe5cb3297c66703a057ca66c05a171bd7f5 +size 16584 diff --git a/data/2025/2504_05xxx/2504.05979/images/4f7cd151bdab47a69f14cb0fefb2b225b07676d7de97906f3f09e608ae362d1f.jpg b/data/2025/2504_05xxx/2504.05979/images/4f7cd151bdab47a69f14cb0fefb2b225b07676d7de97906f3f09e608ae362d1f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e94fb954398b40438e2926d5fc893bb46160d537 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4f7cd151bdab47a69f14cb0fefb2b225b07676d7de97906f3f09e608ae362d1f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae34f80dfa7e9cde881887aa6a996e7674583581bee2d2a4c331a4e61381ca0a +size 10197 diff --git a/data/2025/2504_05xxx/2504.05979/images/4f7ea6846a9f2e71ede47f66f8045aa234b77d2155c2f56eca68f95514f68e80.jpg b/data/2025/2504_05xxx/2504.05979/images/4f7ea6846a9f2e71ede47f66f8045aa234b77d2155c2f56eca68f95514f68e80.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8a78ee44b74b9d8a462405cb6f92e03fd7e6b92 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4f7ea6846a9f2e71ede47f66f8045aa234b77d2155c2f56eca68f95514f68e80.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa7c1c66a19df4d93d7dbac3eb7d26afdd0ecf1184bebb02a74e6e142f72fc09 +size 11799 diff --git a/data/2025/2504_05xxx/2504.05979/images/4f92cfa09792f7a936729795c4faf6f6742488baef7a2394db6b9a5dc3b73e04.jpg b/data/2025/2504_05xxx/2504.05979/images/4f92cfa09792f7a936729795c4faf6f6742488baef7a2394db6b9a5dc3b73e04.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5f3530c1a9abfa7e19f95163e5125db6a1951b66 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4f92cfa09792f7a936729795c4faf6f6742488baef7a2394db6b9a5dc3b73e04.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e684d7441fe304b2c98d30adbf1922575aa7ccd3dc938d29d8c12a99c43e8a6f +size 10234 diff --git a/data/2025/2504_05xxx/2504.05979/images/4fed5bdfe6b3d24177ff8f96422315e81b3ae864616ce0f21770566bf7fd8891.jpg b/data/2025/2504_05xxx/2504.05979/images/4fed5bdfe6b3d24177ff8f96422315e81b3ae864616ce0f21770566bf7fd8891.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1cd2f266f013b4805b43d5de6aa41733a6a1e9b9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/4fed5bdfe6b3d24177ff8f96422315e81b3ae864616ce0f21770566bf7fd8891.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62e41ba37694168cb0ea3d041d2d2cc0dcde20007e02da6fa960f1ed067e3198 +size 20694 diff --git a/data/2025/2504_05xxx/2504.05979/images/505a6db0b4602aea76566f436b70be7da6453677d5c873c5267b35cc43b24a51.jpg b/data/2025/2504_05xxx/2504.05979/images/505a6db0b4602aea76566f436b70be7da6453677d5c873c5267b35cc43b24a51.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d6210b374740134bf5f4d4a6bfd0ffa7e4599356 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/505a6db0b4602aea76566f436b70be7da6453677d5c873c5267b35cc43b24a51.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0fcbdcfb265a4ec463c0113ffa80a53e099cdbc932348198631f89ac9e2adae +size 2001 diff --git a/data/2025/2504_05xxx/2504.05979/images/509dd28767c58df8dc35fefb0ee74154d0b9a3aadcb516526f252eee3fbd72ef.jpg b/data/2025/2504_05xxx/2504.05979/images/509dd28767c58df8dc35fefb0ee74154d0b9a3aadcb516526f252eee3fbd72ef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..337f298c7ba226483768e73bbdefda38016ba7a0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/509dd28767c58df8dc35fefb0ee74154d0b9a3aadcb516526f252eee3fbd72ef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb767a15a986212ff76d8ec89ae44b1b7f2ec97d7eb7547a6be37aa5dc5efa7a +size 26562 diff --git a/data/2025/2504_05xxx/2504.05979/images/50de3ca29f0a98439974b500ee80d55a3e4acb3f252738f8682accecf3be15e0.jpg b/data/2025/2504_05xxx/2504.05979/images/50de3ca29f0a98439974b500ee80d55a3e4acb3f252738f8682accecf3be15e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6955e620a1de85c5d4b53229dcc28028e8302e0d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/50de3ca29f0a98439974b500ee80d55a3e4acb3f252738f8682accecf3be15e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b094e80269c8358bdb3a2a9eb62ffb64c8ec81189bbf32a0ffbd4a7263e13bf +size 8967 diff --git a/data/2025/2504_05xxx/2504.05979/images/50e1bcc4270a236adf25775994fc41829c4d6c762fc5cef6ca4d34dd425a43e5.jpg b/data/2025/2504_05xxx/2504.05979/images/50e1bcc4270a236adf25775994fc41829c4d6c762fc5cef6ca4d34dd425a43e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8c8eb45311e1c51f8a52365e3eaeeaf6d05338c4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/50e1bcc4270a236adf25775994fc41829c4d6c762fc5cef6ca4d34dd425a43e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90e0a8fb439ef75e80015eba492bcac241b62759fa4a743f4b6dbf9fc8a8c16a +size 8198 diff --git a/data/2025/2504_05xxx/2504.05979/images/50ee1a066627ca3b776f328349447b722e1083477451cb97c6b6bfb2c2054b04.jpg b/data/2025/2504_05xxx/2504.05979/images/50ee1a066627ca3b776f328349447b722e1083477451cb97c6b6bfb2c2054b04.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3bae6e7c15c6f3e4af53541c27d8c48541b722b5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/50ee1a066627ca3b776f328349447b722e1083477451cb97c6b6bfb2c2054b04.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73f952876f6511c6ed39e31b9956df8f0a5759de07f21b6f4ea549b0a97881e7 +size 15166 diff --git a/data/2025/2504_05xxx/2504.05979/images/50fb798f21796fa55105c09f2e99d39e009e8f0617af0664a4836dbe70e314d0.jpg b/data/2025/2504_05xxx/2504.05979/images/50fb798f21796fa55105c09f2e99d39e009e8f0617af0664a4836dbe70e314d0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bc4716f8f9747a4d3bb98a82300839f9c48b75fa --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/50fb798f21796fa55105c09f2e99d39e009e8f0617af0664a4836dbe70e314d0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4854e60a635953bae8bbe97f8d74d913addd7eeda24e3041b900557a063b3dcf +size 9632 diff --git a/data/2025/2504_05xxx/2504.05979/images/517e9f2a30d20e6e67d1e7cc8d3275c188d4ba9fe1e3f526c5ea127a17812855.jpg b/data/2025/2504_05xxx/2504.05979/images/517e9f2a30d20e6e67d1e7cc8d3275c188d4ba9fe1e3f526c5ea127a17812855.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aacffc61609c130cac5aea8baaaa30f5b5a08d68 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/517e9f2a30d20e6e67d1e7cc8d3275c188d4ba9fe1e3f526c5ea127a17812855.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c80200aa9b88d5f62d62e477b2169357366f2905189645139388cc96722431a0 +size 13974 diff --git a/data/2025/2504_05xxx/2504.05979/images/51b49e12056077fe1afb554b56beba3c707955dbdefb32d0bdf28a7d9683ab6a.jpg b/data/2025/2504_05xxx/2504.05979/images/51b49e12056077fe1afb554b56beba3c707955dbdefb32d0bdf28a7d9683ab6a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..93cafee3a838c5bafee395786d0e5cc2a812c931 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/51b49e12056077fe1afb554b56beba3c707955dbdefb32d0bdf28a7d9683ab6a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca89cbd00dfaa58fa6424df9e7b140149d617dc3cdf0edee80b926963e4e336a +size 14539 diff --git a/data/2025/2504_05xxx/2504.05979/images/51cc0442254dbc7d3906e354e679e7c97663cf26955df12ad376ac2c249891a4.jpg b/data/2025/2504_05xxx/2504.05979/images/51cc0442254dbc7d3906e354e679e7c97663cf26955df12ad376ac2c249891a4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..561a9186fc8680720ef6d097915f30305bf16ede --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/51cc0442254dbc7d3906e354e679e7c97663cf26955df12ad376ac2c249891a4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee4f86f637300a66ef695e6bcbe96e2abd614ab3392dfd9996b8538147a2e69e +size 14160 diff --git a/data/2025/2504_05xxx/2504.05979/images/526a547e833ad8f409b85457a4b260fbfa64463b2a497a191035f2bdb24ac6b7.jpg b/data/2025/2504_05xxx/2504.05979/images/526a547e833ad8f409b85457a4b260fbfa64463b2a497a191035f2bdb24ac6b7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..730277df6fca7686f181d7d600aa282fe38d4677 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/526a547e833ad8f409b85457a4b260fbfa64463b2a497a191035f2bdb24ac6b7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b113ee2dadb797f56181172e3492cff1c8e1631be2847c35d22539cdd40d692 +size 4955 diff --git a/data/2025/2504_05xxx/2504.05979/images/530cb7230b747fa4e841052b66f6154fe575e082392dc9ebee75fb61c0ee6728.jpg b/data/2025/2504_05xxx/2504.05979/images/530cb7230b747fa4e841052b66f6154fe575e082392dc9ebee75fb61c0ee6728.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f266292b9afd528a0c4facec2eb156c9208ca99c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/530cb7230b747fa4e841052b66f6154fe575e082392dc9ebee75fb61c0ee6728.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18d9a02c31ff2004a562b5fe0ae15ca401b1368b8feff43c9b885b85c4363081 +size 10534 diff --git a/data/2025/2504_05xxx/2504.05979/images/536066035c978487014a28aff6357f8949e5085087a1135fefd97bd330b10ad5.jpg b/data/2025/2504_05xxx/2504.05979/images/536066035c978487014a28aff6357f8949e5085087a1135fefd97bd330b10ad5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b31a7f5aaedf585761a2e0811a49501964605249 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/536066035c978487014a28aff6357f8949e5085087a1135fefd97bd330b10ad5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:267e86837bc919ae178af2f338036c798d91a6ae11e8ed84d4421e830f188440 +size 8578 diff --git a/data/2025/2504_05xxx/2504.05979/images/536e0b422f449656c91454149f3446fde812ec0f995fcf5bbe9e0a43e2d597fa.jpg b/data/2025/2504_05xxx/2504.05979/images/536e0b422f449656c91454149f3446fde812ec0f995fcf5bbe9e0a43e2d597fa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..df2798a322fe2cc29f6ef5efbb939ebce4863850 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/536e0b422f449656c91454149f3446fde812ec0f995fcf5bbe9e0a43e2d597fa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f088a4937e5c4356c14966bebaa91819d0218f3e0aabb0ab5e7b647a824075c9 +size 12090 diff --git a/data/2025/2504_05xxx/2504.05979/images/536ebeb2091f4d9765dfff67b32c88a8b7e2f773ca113d0bf1fe677432f050ca.jpg b/data/2025/2504_05xxx/2504.05979/images/536ebeb2091f4d9765dfff67b32c88a8b7e2f773ca113d0bf1fe677432f050ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56b82d65f50c5f2e8630b15167bc3e7b4a94ca8f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/536ebeb2091f4d9765dfff67b32c88a8b7e2f773ca113d0bf1fe677432f050ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c3eafcb1ab61ff96a62be5344e72f7e7adfa55b9fb0bfbd8f589714f12bcbc3 +size 13724 diff --git a/data/2025/2504_05xxx/2504.05979/images/53b1fed05e6b1e6eefadf734ccb916b0779e1b0b4d17f572e0bed7be77130d6c.jpg b/data/2025/2504_05xxx/2504.05979/images/53b1fed05e6b1e6eefadf734ccb916b0779e1b0b4d17f572e0bed7be77130d6c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db3700736505d9ce8b25d9f206fd3f01ddf871e5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/53b1fed05e6b1e6eefadf734ccb916b0779e1b0b4d17f572e0bed7be77130d6c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44d582213b2fffb6a3d7e63e98fbb8c4b484180287e332e9f4da8f3c4677c203 +size 15380 diff --git a/data/2025/2504_05xxx/2504.05979/images/53c762f53aba99798ebe7781772fd8e13f36de30319f71548adb22664211cf14.jpg b/data/2025/2504_05xxx/2504.05979/images/53c762f53aba99798ebe7781772fd8e13f36de30319f71548adb22664211cf14.jpg new file mode 100644 index 0000000000000000000000000000000000000000..efa97dd32aea2edaa996d8fb9b0a303ae5746c5e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/53c762f53aba99798ebe7781772fd8e13f36de30319f71548adb22664211cf14.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab568454a758fa2d7e3922064a8804669ba9e037f5740c76f7780898e69f71e8 +size 4957 diff --git a/data/2025/2504_05xxx/2504.05979/images/541421ed5de9f82a9202335ca7678323b87627971bf9397ec165d6899894a416.jpg b/data/2025/2504_05xxx/2504.05979/images/541421ed5de9f82a9202335ca7678323b87627971bf9397ec165d6899894a416.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4da6539d54471a9db9a4b36d3cb957601d623d29 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/541421ed5de9f82a9202335ca7678323b87627971bf9397ec165d6899894a416.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ac5b05f9e37be3edc0f3dd39af17e2fcc81ddc62cfea86e31c5683bf500daab +size 992 diff --git a/data/2025/2504_05xxx/2504.05979/images/5447a1f82a340e52710bd0c108dac73285aab065976ccc90d3243a192c03666f.jpg b/data/2025/2504_05xxx/2504.05979/images/5447a1f82a340e52710bd0c108dac73285aab065976ccc90d3243a192c03666f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a380fddabb03d2e8d4c5d503293a3d5a3ab19204 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5447a1f82a340e52710bd0c108dac73285aab065976ccc90d3243a192c03666f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c2d572512a683d4a19b5d2ad194f308b7d83f8af309384520849c0ba4f22798 +size 11012 diff --git a/data/2025/2504_05xxx/2504.05979/images/54845239cccd77ffed346bbbb01a2390dc01dd6649c038184a108f0d78f6e18b.jpg b/data/2025/2504_05xxx/2504.05979/images/54845239cccd77ffed346bbbb01a2390dc01dd6649c038184a108f0d78f6e18b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d3cc57c7223ba3d98553ff519f495f874b235aa --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/54845239cccd77ffed346bbbb01a2390dc01dd6649c038184a108f0d78f6e18b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14dee9f65ab6894807ce73a6ed25f582a82eb289a089a2da819b796da0b4f040 +size 12615 diff --git a/data/2025/2504_05xxx/2504.05979/images/54e8f1e07b35c7646e6a1992f478ea6768d117a77bc4a4fc4afcd51a7f5a0920.jpg b/data/2025/2504_05xxx/2504.05979/images/54e8f1e07b35c7646e6a1992f478ea6768d117a77bc4a4fc4afcd51a7f5a0920.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b2406c3128a59302265c9958095c847565c9406 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/54e8f1e07b35c7646e6a1992f478ea6768d117a77bc4a4fc4afcd51a7f5a0920.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d57f70b11da0dafc7cddb2ea10b68c5f8d25ab454826286f33b2e9ec11242c8 +size 10189 diff --git a/data/2025/2504_05xxx/2504.05979/images/55dc8086726bc727505f262393f9f58520886c8655e9a928d6c1c6186955bbfd.jpg b/data/2025/2504_05xxx/2504.05979/images/55dc8086726bc727505f262393f9f58520886c8655e9a928d6c1c6186955bbfd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce90d7762ad7b5f9ac1d5c30360a882ab726808a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/55dc8086726bc727505f262393f9f58520886c8655e9a928d6c1c6186955bbfd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e973bc3151427672f323efa5d4aef1b6a352b9080be702b41db38aeaa1e82073 +size 13058 diff --git a/data/2025/2504_05xxx/2504.05979/images/55eb18aa82b40bdb5d13022c1ffe68e93f0cf3359c85faa059e9e14d3e5ff8c3.jpg b/data/2025/2504_05xxx/2504.05979/images/55eb18aa82b40bdb5d13022c1ffe68e93f0cf3359c85faa059e9e14d3e5ff8c3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31190cbd8c71a55bf46137a6d8a7eb3358ab4e02 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/55eb18aa82b40bdb5d13022c1ffe68e93f0cf3359c85faa059e9e14d3e5ff8c3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1168b6646be20af734f63fa45fc2a0257c36c496a35664fdf56bdd821cf708e9 +size 3234 diff --git a/data/2025/2504_05xxx/2504.05979/images/56f24172af8797ef15310df874ee2c02e4edcdae14624400e6196e1a2638ef82.jpg b/data/2025/2504_05xxx/2504.05979/images/56f24172af8797ef15310df874ee2c02e4edcdae14624400e6196e1a2638ef82.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6fb63429073972e10e23c4552926d6163ab3674f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/56f24172af8797ef15310df874ee2c02e4edcdae14624400e6196e1a2638ef82.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e842a06a48aed2cbf43c58c72a8fd907c609ebf9d8c3d7363eb857e23a75307 +size 15606 diff --git a/data/2025/2504_05xxx/2504.05979/images/572feead6e536b7524eb2f723ff12841c3e537418651f45988dbcb4312766cf2.jpg b/data/2025/2504_05xxx/2504.05979/images/572feead6e536b7524eb2f723ff12841c3e537418651f45988dbcb4312766cf2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46b81433f300f27e1e1573efa179284deaef88c3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/572feead6e536b7524eb2f723ff12841c3e537418651f45988dbcb4312766cf2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:170c1b09a73bd6e2bef2593778b48f4626c4644f52038cf9ef82130e11661868 +size 23640 diff --git a/data/2025/2504_05xxx/2504.05979/images/577ab214cdb8de2d03a7a2be9b878ed69afb604d28c0d21381d8da756093e973.jpg b/data/2025/2504_05xxx/2504.05979/images/577ab214cdb8de2d03a7a2be9b878ed69afb604d28c0d21381d8da756093e973.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c994fd8499025219556f608a4c23a2d5bfaf68e9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/577ab214cdb8de2d03a7a2be9b878ed69afb604d28c0d21381d8da756093e973.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf3a21e3ebe95e45bba59e0a25f173b1f500aef9b7103e2839975fbbbf2d05e8 +size 11651 diff --git a/data/2025/2504_05xxx/2504.05979/images/577dde2d7debd868c1455d80030c181d453b6b7c4530b75d54e42bb0bc034596.jpg b/data/2025/2504_05xxx/2504.05979/images/577dde2d7debd868c1455d80030c181d453b6b7c4530b75d54e42bb0bc034596.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2c159cf442b48c3ea1429d90d9ccf72f02d2a90 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/577dde2d7debd868c1455d80030c181d453b6b7c4530b75d54e42bb0bc034596.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e7de8250118916e6683e2c51bfb57d4457426b1753fb3b82b5f4b0afaa15637 +size 4483 diff --git a/data/2025/2504_05xxx/2504.05979/images/57830b99a43f06f49ba04d1b2f5ded94ea4df8175531c29f23aca0f89a53b83b.jpg b/data/2025/2504_05xxx/2504.05979/images/57830b99a43f06f49ba04d1b2f5ded94ea4df8175531c29f23aca0f89a53b83b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e81b066d7a6e4c729e709216fb889310ad3e37db --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/57830b99a43f06f49ba04d1b2f5ded94ea4df8175531c29f23aca0f89a53b83b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0709ed69e274d83e4b6f3be3fce0566713c7302b4e1a3e4cac7263adc05418ac +size 6004 diff --git a/data/2025/2504_05xxx/2504.05979/images/5789a11c88dd67fe3ecf18424e18808df9be15ba558dca4d60893646f7ed02f2.jpg b/data/2025/2504_05xxx/2504.05979/images/5789a11c88dd67fe3ecf18424e18808df9be15ba558dca4d60893646f7ed02f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..22e9f13077b4e243b254ed426b8a239f9598ae25 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5789a11c88dd67fe3ecf18424e18808df9be15ba558dca4d60893646f7ed02f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cac5ddab544500bc125799851829fa39802e6351999ddad916c0e16b47f67ee +size 5227 diff --git a/data/2025/2504_05xxx/2504.05979/images/58100ddfbfd9cfbbcbe07188ce06d08a6990468de6ef91ce1596ca7e44f0513d.jpg b/data/2025/2504_05xxx/2504.05979/images/58100ddfbfd9cfbbcbe07188ce06d08a6990468de6ef91ce1596ca7e44f0513d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..217c9adf6c84f6afbecc4b76aca42e324969695c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/58100ddfbfd9cfbbcbe07188ce06d08a6990468de6ef91ce1596ca7e44f0513d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30303ba4cf83e487a5aba9984f466ba880295a73d47c9c8ae2c361590234e5f4 +size 13811 diff --git a/data/2025/2504_05xxx/2504.05979/images/582236643d6a53099264228610a5ef869dd1b77a4e837887915e57f35aec63bc.jpg b/data/2025/2504_05xxx/2504.05979/images/582236643d6a53099264228610a5ef869dd1b77a4e837887915e57f35aec63bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8fe66ef62403f09a368aa72cc28e82cea2b32026 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/582236643d6a53099264228610a5ef869dd1b77a4e837887915e57f35aec63bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3bfeae7654586b44cc1869dc0f3204570d836cd941babe1c37b3f57d42b9b2f +size 18403 diff --git a/data/2025/2504_05xxx/2504.05979/images/583b65ff347f460c795a71a3cedff21e9e48ec2217a26a74271927b6aa96f5d5.jpg b/data/2025/2504_05xxx/2504.05979/images/583b65ff347f460c795a71a3cedff21e9e48ec2217a26a74271927b6aa96f5d5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3b58a3d82e5cddb56a9fc218cd9a3ada09dcc9f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/583b65ff347f460c795a71a3cedff21e9e48ec2217a26a74271927b6aa96f5d5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03685db5f070a71dc4b15927a24147e6c826655b32a30ab85d252e5cc5b8d49b +size 7360 diff --git a/data/2025/2504_05xxx/2504.05979/images/58dcd2fc462e78a9dc9f7d7d52dd5084aa6aafb0d48039e3f826d0102bca067b.jpg b/data/2025/2504_05xxx/2504.05979/images/58dcd2fc462e78a9dc9f7d7d52dd5084aa6aafb0d48039e3f826d0102bca067b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c4731c201fc42def757607cac4d84c6da706ffd5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/58dcd2fc462e78a9dc9f7d7d52dd5084aa6aafb0d48039e3f826d0102bca067b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9547d4f492f25db9ef7f82f8b8bc80f9089f6cbba37776de86d26cf72d157f0c +size 11953 diff --git a/data/2025/2504_05xxx/2504.05979/images/58f6525436f8706212ce32abd6aa2f4ba0d58a7cd53654d0f24fecf81b3fb9cb.jpg b/data/2025/2504_05xxx/2504.05979/images/58f6525436f8706212ce32abd6aa2f4ba0d58a7cd53654d0f24fecf81b3fb9cb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8679fa19d7f23db5f22c39b4aa861f32753b66fd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/58f6525436f8706212ce32abd6aa2f4ba0d58a7cd53654d0f24fecf81b3fb9cb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac00ed427cff40d464f24d03fd0afe72533d037df59ff977912c7d8a773d52bd +size 11350 diff --git a/data/2025/2504_05xxx/2504.05979/images/5930d203da41b02946e5b564261b3a200c305a6a549a4ba7c3961c80697c16ce.jpg b/data/2025/2504_05xxx/2504.05979/images/5930d203da41b02946e5b564261b3a200c305a6a549a4ba7c3961c80697c16ce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3c3b1d46d806129ef404d6390ccce83f8e0f152f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5930d203da41b02946e5b564261b3a200c305a6a549a4ba7c3961c80697c16ce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:851663bd946643fa1b79563056e0f8e256e60fbc4369eca0de886c1efd21229a +size 12105 diff --git a/data/2025/2504_05xxx/2504.05979/images/598045ae955aa43906186813c0efdb19e703aeb859e6842ca9badf105529c011.jpg b/data/2025/2504_05xxx/2504.05979/images/598045ae955aa43906186813c0efdb19e703aeb859e6842ca9badf105529c011.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e936d2807fe478532e45a00bc80165fa126201e3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/598045ae955aa43906186813c0efdb19e703aeb859e6842ca9badf105529c011.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d1e7267edb33c3f02fd79823ddbc3e18bb6185a6cfdac48c70d8e90d6ad2bed +size 13279 diff --git a/data/2025/2504_05xxx/2504.05979/images/59b7eab0c07c081369aaeb8fc99ee1be38d0f888691576752368cada60c2dfd5.jpg b/data/2025/2504_05xxx/2504.05979/images/59b7eab0c07c081369aaeb8fc99ee1be38d0f888691576752368cada60c2dfd5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..50096e77d99527daa9c5dc586964c9b55d71e016 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/59b7eab0c07c081369aaeb8fc99ee1be38d0f888691576752368cada60c2dfd5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db1bc7d622880c854be5257781da67002009c9fd9e877105846f9971354ef491 +size 8926 diff --git a/data/2025/2504_05xxx/2504.05979/images/59f2bdbb4c18ff5b2eaf1af8462de2d557b98afb5403da1a76ec1f639e6017a6.jpg b/data/2025/2504_05xxx/2504.05979/images/59f2bdbb4c18ff5b2eaf1af8462de2d557b98afb5403da1a76ec1f639e6017a6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cc1ade0813c94a5835d7e5a4f533064d705a5a06 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/59f2bdbb4c18ff5b2eaf1af8462de2d557b98afb5403da1a76ec1f639e6017a6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f425c027e23cd1440a89a9692fc318d441dd26e962f3d7f907f754b57ebf04d +size 16970 diff --git a/data/2025/2504_05xxx/2504.05979/images/5bbe5e94282bd4bc533b7d94b749decb59742f5aee7bcb68887dcce921c8b324.jpg b/data/2025/2504_05xxx/2504.05979/images/5bbe5e94282bd4bc533b7d94b749decb59742f5aee7bcb68887dcce921c8b324.jpg new file mode 100644 index 0000000000000000000000000000000000000000..825b31f988a63375a0a130f81f5f7b46ec3db65d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5bbe5e94282bd4bc533b7d94b749decb59742f5aee7bcb68887dcce921c8b324.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a974917f465463d16e2f4fdce34ecd7abdc21f28ee48a3a40059a4ea70b68a9 +size 15333 diff --git a/data/2025/2504_05xxx/2504.05979/images/5bd9acccaac95cfe7fe77ac7db21eb318d54092425bd5f07277c03e7499c903f.jpg b/data/2025/2504_05xxx/2504.05979/images/5bd9acccaac95cfe7fe77ac7db21eb318d54092425bd5f07277c03e7499c903f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..407203e38b918a8b73397a871d67946e6820694c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5bd9acccaac95cfe7fe77ac7db21eb318d54092425bd5f07277c03e7499c903f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3777467b2b47bfaa373a4221082dcae0b17eee916efc14a111219eb48d6f0e41 +size 17352 diff --git a/data/2025/2504_05xxx/2504.05979/images/5d0d22f1be19f77f4671c9695622b0c8c720ea17567c646c8545d5c47f0261dd.jpg b/data/2025/2504_05xxx/2504.05979/images/5d0d22f1be19f77f4671c9695622b0c8c720ea17567c646c8545d5c47f0261dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f26f7da80147c0099a741d7be07e0af534e58dbb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5d0d22f1be19f77f4671c9695622b0c8c720ea17567c646c8545d5c47f0261dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92e52f6f98c764872eccd78b925958d2475b3d1fc3a79861a2c79e2868b23c23 +size 10643 diff --git a/data/2025/2504_05xxx/2504.05979/images/5d220158cfe75b892d7a3627f6bbfe90c6327f1ca348ff2386cb8e06bc0bd166.jpg b/data/2025/2504_05xxx/2504.05979/images/5d220158cfe75b892d7a3627f6bbfe90c6327f1ca348ff2386cb8e06bc0bd166.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9fd90f000a43c966e7d44df609f36b422488f90e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5d220158cfe75b892d7a3627f6bbfe90c6327f1ca348ff2386cb8e06bc0bd166.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e59a1089385d87556989e9309719485d4dabecbc4beac6938ac357b6f0a4ce6e +size 8704 diff --git a/data/2025/2504_05xxx/2504.05979/images/5d3c5fa7d9c2e13ae59f463d54f3c900277b032d69c97c5dd725781b8fc54200.jpg b/data/2025/2504_05xxx/2504.05979/images/5d3c5fa7d9c2e13ae59f463d54f3c900277b032d69c97c5dd725781b8fc54200.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0bb74258bae952163edadb0e5f75be88140e520 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5d3c5fa7d9c2e13ae59f463d54f3c900277b032d69c97c5dd725781b8fc54200.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3f211ffcce31d2361a028f6df5daa1c56cdbd8eca60fd5f03d6e07d003ebeed +size 14577 diff --git a/data/2025/2504_05xxx/2504.05979/images/5d41134c1d56df0d9d4e13222e77a77b5332513dbf0e28ef10e50470295d17e3.jpg b/data/2025/2504_05xxx/2504.05979/images/5d41134c1d56df0d9d4e13222e77a77b5332513dbf0e28ef10e50470295d17e3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ebf000288a6f089d79e8b7f75f97f73ce01a06fe --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5d41134c1d56df0d9d4e13222e77a77b5332513dbf0e28ef10e50470295d17e3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22914c62517822566b61fb950735fe49dc87894c27b898eb9e9bdf8524546126 +size 12396 diff --git a/data/2025/2504_05xxx/2504.05979/images/5d4b6580226201ab5042c29511d6da5a3409b9f5c7fee1d2d9c074d14f64765b.jpg b/data/2025/2504_05xxx/2504.05979/images/5d4b6580226201ab5042c29511d6da5a3409b9f5c7fee1d2d9c074d14f64765b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a4eb5d9d2118bbfe65283f81a1e5be811428044 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5d4b6580226201ab5042c29511d6da5a3409b9f5c7fee1d2d9c074d14f64765b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa343404f05a2b52bd040183268734c56cad8e0a9346f664efa8e0299fa1546c +size 8277 diff --git a/data/2025/2504_05xxx/2504.05979/images/5d604baed8d657358ea3a3aa09a0796cbab115ba03ae7dc21bef555277a6b92c.jpg b/data/2025/2504_05xxx/2504.05979/images/5d604baed8d657358ea3a3aa09a0796cbab115ba03ae7dc21bef555277a6b92c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..533cfdbfd2440c56bb2e1059b472e1e61b0743b9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5d604baed8d657358ea3a3aa09a0796cbab115ba03ae7dc21bef555277a6b92c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0be9a36ceaa1d0a9674220ff98a99022af53a13b6162e4f61fd91fe66fe4227 +size 15165 diff --git a/data/2025/2504_05xxx/2504.05979/images/5d70486874bdca6ec3533ff018df819168b3864b389e7def95b35ea1d5fd3941.jpg b/data/2025/2504_05xxx/2504.05979/images/5d70486874bdca6ec3533ff018df819168b3864b389e7def95b35ea1d5fd3941.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d499e8a9183e8d9ba4e62f2c3021b2442f80ae91 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5d70486874bdca6ec3533ff018df819168b3864b389e7def95b35ea1d5fd3941.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4789836a0e0928042a7ab7b83de41e35848e0c6a5681926f0095f88cb3edcca +size 11883 diff --git a/data/2025/2504_05xxx/2504.05979/images/5d871e96ad190ac8245ba7e97f4356f76317189e597d17530c30c48b2ef31192.jpg b/data/2025/2504_05xxx/2504.05979/images/5d871e96ad190ac8245ba7e97f4356f76317189e597d17530c30c48b2ef31192.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fbd7f43a47b246e32b957b8c6a87ea6d3ed7c4ad --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5d871e96ad190ac8245ba7e97f4356f76317189e597d17530c30c48b2ef31192.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffbefa20228a9dc117ef9e0ac8bf34846f60a4322b28e0d2c020abef816f937a +size 13191 diff --git a/data/2025/2504_05xxx/2504.05979/images/5daa456ff2e454b0f91f88203eed2a998e30a44e393325bd1cc55a49937d3939.jpg b/data/2025/2504_05xxx/2504.05979/images/5daa456ff2e454b0f91f88203eed2a998e30a44e393325bd1cc55a49937d3939.jpg new file mode 100644 index 0000000000000000000000000000000000000000..babd96527144da1742ce0c6333b6659f91b93642 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5daa456ff2e454b0f91f88203eed2a998e30a44e393325bd1cc55a49937d3939.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f60dc9bc16bfe7312ed242cb6a97f6697aecf0bdddaa6682ae607a2cf5ed520 +size 2848 diff --git a/data/2025/2504_05xxx/2504.05979/images/5daddf44b2febe0e6103c50d850ffed87ab4624a8da3303ae0a44eb83321b1ed.jpg b/data/2025/2504_05xxx/2504.05979/images/5daddf44b2febe0e6103c50d850ffed87ab4624a8da3303ae0a44eb83321b1ed.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2380d33247d5dfc36e62e7786cde3216ffc9d17f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5daddf44b2febe0e6103c50d850ffed87ab4624a8da3303ae0a44eb83321b1ed.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95da3a6ac7c11cddbb307a86b0d566d6c8df44a86a139fdb6f0f03e810face83 +size 9160 diff --git a/data/2025/2504_05xxx/2504.05979/images/5df66609cbc1920bfd7bfab7731dee320fbf9bd64ef127bfe114205f6384aaa1.jpg b/data/2025/2504_05xxx/2504.05979/images/5df66609cbc1920bfd7bfab7731dee320fbf9bd64ef127bfe114205f6384aaa1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b0068e0af0c7bcc57d643af09d2e03b1a153dac --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5df66609cbc1920bfd7bfab7731dee320fbf9bd64ef127bfe114205f6384aaa1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25610845e674e43ec4f7eefd43b198423299677ad0fe8bb34dc289517e4cb556 +size 942 diff --git a/data/2025/2504_05xxx/2504.05979/images/5e416a9762e2def779608eb1bf6eee5a9cee49745a628b45ccc17073ed461705.jpg b/data/2025/2504_05xxx/2504.05979/images/5e416a9762e2def779608eb1bf6eee5a9cee49745a628b45ccc17073ed461705.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bed7da8fbaaaac86841063886921a094ce6354d1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5e416a9762e2def779608eb1bf6eee5a9cee49745a628b45ccc17073ed461705.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:120f64799a6953945c32346dd6e9bab40dd8d8d84033899efb0411a306ff1c49 +size 919 diff --git a/data/2025/2504_05xxx/2504.05979/images/5ef2471bae5d6ac28f80fd9606df44d177b953395bf0f4d7328ae08ca174b1ee.jpg b/data/2025/2504_05xxx/2504.05979/images/5ef2471bae5d6ac28f80fd9606df44d177b953395bf0f4d7328ae08ca174b1ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c639b00310dc6de03906aad08aacb9dccda4336a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5ef2471bae5d6ac28f80fd9606df44d177b953395bf0f4d7328ae08ca174b1ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c2500df5d96dfdfca23cac7ca46301c1c38ddea501c740a8bf47387b971e350 +size 11752 diff --git a/data/2025/2504_05xxx/2504.05979/images/5f022a9d8700e671b60295a4ae5dd57ff7c7747fa42c0d5b9d6fb09f4fe1b014.jpg b/data/2025/2504_05xxx/2504.05979/images/5f022a9d8700e671b60295a4ae5dd57ff7c7747fa42c0d5b9d6fb09f4fe1b014.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1718a4baf255ca126c062c97ca2b8245b2006b13 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5f022a9d8700e671b60295a4ae5dd57ff7c7747fa42c0d5b9d6fb09f4fe1b014.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4895ab99f567da7d3caf5e129d50643b40961d7a9f7840679bb6b8ecf92955e +size 5895 diff --git a/data/2025/2504_05xxx/2504.05979/images/5f20c93306fe70d43acd29c720c491e8d5d808966bbaa918395e1a87b2755d64.jpg b/data/2025/2504_05xxx/2504.05979/images/5f20c93306fe70d43acd29c720c491e8d5d808966bbaa918395e1a87b2755d64.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ac2de48f448d1d53f8a8406696c6841c3823458 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5f20c93306fe70d43acd29c720c491e8d5d808966bbaa918395e1a87b2755d64.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4b861bec43ca561dc46d638cfe2a3446dee7f0920847cc72140242666507b89 +size 10602 diff --git a/data/2025/2504_05xxx/2504.05979/images/5fd37721ff6283442746e2e6bca6d7639893b65c0c252601a4e8feeaec442651.jpg b/data/2025/2504_05xxx/2504.05979/images/5fd37721ff6283442746e2e6bca6d7639893b65c0c252601a4e8feeaec442651.jpg new file mode 100644 index 0000000000000000000000000000000000000000..864940cdbe5e46376b07dc3118c4e7f4aaed695f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5fd37721ff6283442746e2e6bca6d7639893b65c0c252601a4e8feeaec442651.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:231e11d0e4e96c175ee0b140afaba477afbe2d3fd41bb76f95103a59c393faa0 +size 11580 diff --git a/data/2025/2504_05xxx/2504.05979/images/5fd98c00acb24bb685d7ae786468c021486683a40b42e8431c28d0d29da3cf89.jpg b/data/2025/2504_05xxx/2504.05979/images/5fd98c00acb24bb685d7ae786468c021486683a40b42e8431c28d0d29da3cf89.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f362281204ad51fdfd285ddbfa9f3eb12d32c304 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/5fd98c00acb24bb685d7ae786468c021486683a40b42e8431c28d0d29da3cf89.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7c82ef73c2880be6f33b95b5a6d3b87ac68b25e923a84522a44f127651a65fc +size 11147 diff --git a/data/2025/2504_05xxx/2504.05979/images/600cb4b1adf7ff450977eb1ff9c1356ca0f2202a614d5297fdfa99e34eb010fc.jpg b/data/2025/2504_05xxx/2504.05979/images/600cb4b1adf7ff450977eb1ff9c1356ca0f2202a614d5297fdfa99e34eb010fc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d38a9594eb30ac48fe0f63f9283f893b386ff38 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/600cb4b1adf7ff450977eb1ff9c1356ca0f2202a614d5297fdfa99e34eb010fc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20967e50808a75517a63ec502c5618c904ce079bcb3c8893d0c56144800ae349 +size 13889 diff --git a/data/2025/2504_05xxx/2504.05979/images/6043908cb4a51385380e40a2264f62f78e48ba2368469bbca5ad6f2dde9482cf.jpg b/data/2025/2504_05xxx/2504.05979/images/6043908cb4a51385380e40a2264f62f78e48ba2368469bbca5ad6f2dde9482cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..95584eeccf6d64984504b7c11e0c5e7f7e27a576 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6043908cb4a51385380e40a2264f62f78e48ba2368469bbca5ad6f2dde9482cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d309e041556c50d6a94baf48c7167530a5bca449b2913fbab5194b052a970d6 +size 14155 diff --git a/data/2025/2504_05xxx/2504.05979/images/6075fec13975411e989e0044e4a2f0ee1c763038760b1eee8ab0a205a97ce7fd.jpg b/data/2025/2504_05xxx/2504.05979/images/6075fec13975411e989e0044e4a2f0ee1c763038760b1eee8ab0a205a97ce7fd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..228cd6dbf883738c009ae1f5466b5627fa17a10a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6075fec13975411e989e0044e4a2f0ee1c763038760b1eee8ab0a205a97ce7fd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b520090be9e2e24f94d91c33381dbfa9b0f150ff61cef3f3e458ddd110819d7 +size 11334 diff --git a/data/2025/2504_05xxx/2504.05979/images/6076be77d027d41534f7c4a4fd02e93499c2b66c1cccf37db5b69d7f522c9268.jpg b/data/2025/2504_05xxx/2504.05979/images/6076be77d027d41534f7c4a4fd02e93499c2b66c1cccf37db5b69d7f522c9268.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6259f2fd127b3c957a5e93ab3084254efc563fa8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6076be77d027d41534f7c4a4fd02e93499c2b66c1cccf37db5b69d7f522c9268.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90dae706a2814e3bb058c159aa713faf5239dcbb82e000029b7615dab60173b9 +size 14841 diff --git a/data/2025/2504_05xxx/2504.05979/images/607880b57a1f83f7d1721514860baaaa5af0252138afb508b72a65611fc2b893.jpg b/data/2025/2504_05xxx/2504.05979/images/607880b57a1f83f7d1721514860baaaa5af0252138afb508b72a65611fc2b893.jpg new file mode 100644 index 0000000000000000000000000000000000000000..189809417d12a9d6e7585dbae600014020e6e8e4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/607880b57a1f83f7d1721514860baaaa5af0252138afb508b72a65611fc2b893.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d15e861f75dd7f018de42c9d56af6646d9a00ac76c23ceb8501518f844869725 +size 7128 diff --git a/data/2025/2504_05xxx/2504.05979/images/61a9787b52f106410ced18bbf723e46ef2d3db614a1b9796dd588397ad04206e.jpg b/data/2025/2504_05xxx/2504.05979/images/61a9787b52f106410ced18bbf723e46ef2d3db614a1b9796dd588397ad04206e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a1cd7e865c52f80c575690974365c280ea7afe64 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/61a9787b52f106410ced18bbf723e46ef2d3db614a1b9796dd588397ad04206e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8291798d4b5e4ec1eff61cf55b2d0554202855fc6f18c8064ed902e307d55883 +size 12565 diff --git a/data/2025/2504_05xxx/2504.05979/images/61b9d5a9c147c446387301c4f13acd7ceeccc06f653954410c1dc398d1fd4bc3.jpg b/data/2025/2504_05xxx/2504.05979/images/61b9d5a9c147c446387301c4f13acd7ceeccc06f653954410c1dc398d1fd4bc3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..170029a6eedc90aed6b8539b282b60439f993726 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/61b9d5a9c147c446387301c4f13acd7ceeccc06f653954410c1dc398d1fd4bc3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a00f64a2d64fa9ffda014322c92bf257855ea5b2493396942038949e5777b579 +size 18462 diff --git a/data/2025/2504_05xxx/2504.05979/images/6217bbdb7e9c6d3d88bbced40c11ad8ad80d6cc7c0af8b951f80ab29474cc67e.jpg b/data/2025/2504_05xxx/2504.05979/images/6217bbdb7e9c6d3d88bbced40c11ad8ad80d6cc7c0af8b951f80ab29474cc67e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..55e4c8d166e5b6e7aaf23ad1c17b4e3102b9626c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6217bbdb7e9c6d3d88bbced40c11ad8ad80d6cc7c0af8b951f80ab29474cc67e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:664fa5da8e282731a669c38d4ee9d462f19e9f071e48b962ccd0ca7097b701d6 +size 12660 diff --git a/data/2025/2504_05xxx/2504.05979/images/625aa6127eca041e5810dc4b46c00716834bba9bb7fb4e2613067d7135aba20f.jpg b/data/2025/2504_05xxx/2504.05979/images/625aa6127eca041e5810dc4b46c00716834bba9bb7fb4e2613067d7135aba20f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75125ef5c52216b569a6825a16d42bc43b6247d8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/625aa6127eca041e5810dc4b46c00716834bba9bb7fb4e2613067d7135aba20f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c2a7a812d5b4df9b4d5d12895227e1e58fc7410ec8c7a9a61d9f3be395a285c +size 8379 diff --git a/data/2025/2504_05xxx/2504.05979/images/628f0ed3fe5c2872259b8c53ed502d9c944f34e65615f8483a616fc2cad70310.jpg b/data/2025/2504_05xxx/2504.05979/images/628f0ed3fe5c2872259b8c53ed502d9c944f34e65615f8483a616fc2cad70310.jpg new file mode 100644 index 0000000000000000000000000000000000000000..352292979f1e0e65dadab06fc193b2fa6858617e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/628f0ed3fe5c2872259b8c53ed502d9c944f34e65615f8483a616fc2cad70310.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a4d2824897df921f997bf7254ba5d8b14435e472236cca2b7e442b4fff05a8e +size 4344 diff --git a/data/2025/2504_05xxx/2504.05979/images/62b37976bf23d0f522572b1d32ef2041be79cd68b5ed5fc0163a0fe71955417f.jpg b/data/2025/2504_05xxx/2504.05979/images/62b37976bf23d0f522572b1d32ef2041be79cd68b5ed5fc0163a0fe71955417f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a61b4bf8b3f82718ec46877e872d68f1fa627de0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/62b37976bf23d0f522572b1d32ef2041be79cd68b5ed5fc0163a0fe71955417f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a15d9e2476e16eb33c282582837b83dc32cef65f661b5b66eed6596e540fccd6 +size 8388 diff --git a/data/2025/2504_05xxx/2504.05979/images/6353301ea1a4bd02c64e31effbe95a706a80640c23bedd6dff2583457c4933e0.jpg b/data/2025/2504_05xxx/2504.05979/images/6353301ea1a4bd02c64e31effbe95a706a80640c23bedd6dff2583457c4933e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db257dd4e6fecfb3c7465d7e9651fc7963fd934a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6353301ea1a4bd02c64e31effbe95a706a80640c23bedd6dff2583457c4933e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f181ff8989ef9c91ce3957c4e7f004f6581ad99810635e93816bad1dbe00278c +size 5216 diff --git a/data/2025/2504_05xxx/2504.05979/images/63887670dc86e68d25f445742e627570bc0bce4be5ac6001afe0c309c31c6b0a.jpg b/data/2025/2504_05xxx/2504.05979/images/63887670dc86e68d25f445742e627570bc0bce4be5ac6001afe0c309c31c6b0a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4eae25d911f7004eeb9f2770b04908a2db6da9c5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/63887670dc86e68d25f445742e627570bc0bce4be5ac6001afe0c309c31c6b0a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e8de2b076d2d5b8f5c613527f43a5c367fbfc05f4f69217782194f7035f0ca2 +size 13949 diff --git a/data/2025/2504_05xxx/2504.05979/images/63a0fda286dbcf6aab1f70694ab0576b25d5fd247827bdfa027aca3ce2984b08.jpg b/data/2025/2504_05xxx/2504.05979/images/63a0fda286dbcf6aab1f70694ab0576b25d5fd247827bdfa027aca3ce2984b08.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b226b6abb79c938842769170241bd44b554faae9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/63a0fda286dbcf6aab1f70694ab0576b25d5fd247827bdfa027aca3ce2984b08.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b65d31cd77ccdb67036347d4486f89b88a042497dd93f0e06a5b6e188b793533 +size 10171 diff --git a/data/2025/2504_05xxx/2504.05979/images/63bf7481f04803305e25b194d0f6a798e29e4ad8347b13488f22af38472f7dbb.jpg b/data/2025/2504_05xxx/2504.05979/images/63bf7481f04803305e25b194d0f6a798e29e4ad8347b13488f22af38472f7dbb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b997bd5383601513c256d442a1813187e3dacdc3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/63bf7481f04803305e25b194d0f6a798e29e4ad8347b13488f22af38472f7dbb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38dd319b14de426911d0d4e419facb6a897b828cef5b61b65eaa37999b06930d +size 9750 diff --git a/data/2025/2504_05xxx/2504.05979/images/63d21e13c3a1a35a1fd1c9cedc021755050cc1398455173eaf4da3af031229aa.jpg b/data/2025/2504_05xxx/2504.05979/images/63d21e13c3a1a35a1fd1c9cedc021755050cc1398455173eaf4da3af031229aa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2f5ac29a3f6129649fe026723578b62bf0f65946 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/63d21e13c3a1a35a1fd1c9cedc021755050cc1398455173eaf4da3af031229aa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d4b7727792c768e3718d61a229ef6e4db3e975e6d8a40545fc0009f009b0f4f +size 16226 diff --git a/data/2025/2504_05xxx/2504.05979/images/646e79ac9f1f81f041625ce775812c9530257492f90e5afea41349ce5cd6894a.jpg b/data/2025/2504_05xxx/2504.05979/images/646e79ac9f1f81f041625ce775812c9530257492f90e5afea41349ce5cd6894a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ca89cb4129f856651a4e7eedec9a8d265ec5fed --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/646e79ac9f1f81f041625ce775812c9530257492f90e5afea41349ce5cd6894a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19b59ee8f0f6f4a3c1bf682ea49fc1d25d98f776ee5da77a57efde8a53f0c4c4 +size 997 diff --git a/data/2025/2504_05xxx/2504.05979/images/6487fa3903724c091ac75d960053975fd920521719237fbe538abf890e5b7d14.jpg b/data/2025/2504_05xxx/2504.05979/images/6487fa3903724c091ac75d960053975fd920521719237fbe538abf890e5b7d14.jpg new file mode 100644 index 0000000000000000000000000000000000000000..af3b700c88238787d7820cbb4b7455547458a928 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6487fa3903724c091ac75d960053975fd920521719237fbe538abf890e5b7d14.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc286d5458af7b9fcf113bc988115fdff6cf74f7a183ce564b8eb112758577f2 +size 13680 diff --git a/data/2025/2504_05xxx/2504.05979/images/64bca9f9e552b5aa559f2be8f97c1db4addda01d6db566245d3a344dc004fa2b.jpg b/data/2025/2504_05xxx/2504.05979/images/64bca9f9e552b5aa559f2be8f97c1db4addda01d6db566245d3a344dc004fa2b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dd40048c7158fe021e7184890310effda4e66d5c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/64bca9f9e552b5aa559f2be8f97c1db4addda01d6db566245d3a344dc004fa2b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98522098863fa68709cb172fbc3b428749b2de327fa5fdcfa6d6046aa5b77a36 +size 12793 diff --git a/data/2025/2504_05xxx/2504.05979/images/64e6d603fb4bb84b5ebce439ab2fdb3c6e0e566e8af8982c1c50d5378b6fe487.jpg b/data/2025/2504_05xxx/2504.05979/images/64e6d603fb4bb84b5ebce439ab2fdb3c6e0e566e8af8982c1c50d5378b6fe487.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a029fdfa8bf25212631764a265aec25d2684239d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/64e6d603fb4bb84b5ebce439ab2fdb3c6e0e566e8af8982c1c50d5378b6fe487.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ccc1937134b575b935f7a991ce49c3f7a1ebe4e3681c74e42680f902716d51d +size 15734 diff --git a/data/2025/2504_05xxx/2504.05979/images/64f0d91151654ae4516f7d303ba48c4e01706725697a79b75bb9494acae439d4.jpg b/data/2025/2504_05xxx/2504.05979/images/64f0d91151654ae4516f7d303ba48c4e01706725697a79b75bb9494acae439d4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c908e8011f6ea667d8ad91fa88997e189a03aff0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/64f0d91151654ae4516f7d303ba48c4e01706725697a79b75bb9494acae439d4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67a09c5f13fe8e7d4352f8e1b08f3849f9a035c3a3e7d410c143ba7ee589edaf +size 8220 diff --git a/data/2025/2504_05xxx/2504.05979/images/652932d800457a58f1281b3ba28fa3df0773930a60325d39e942f44b98045c8e.jpg b/data/2025/2504_05xxx/2504.05979/images/652932d800457a58f1281b3ba28fa3df0773930a60325d39e942f44b98045c8e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..83d2c33018f37d5aec083ce8827b02698b4fd9c0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/652932d800457a58f1281b3ba28fa3df0773930a60325d39e942f44b98045c8e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07eecc75a34d47ff9e517385dd088d0e1b1966a77925da3716aad9214214e22d +size 7511 diff --git a/data/2025/2504_05xxx/2504.05979/images/65a3cacd9307277d2c15e32c391395ee4d10278b0cde3e7007004223bb26c6c9.jpg b/data/2025/2504_05xxx/2504.05979/images/65a3cacd9307277d2c15e32c391395ee4d10278b0cde3e7007004223bb26c6c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..342db5d4eaeeed71abbf8b2c5dc73f021419c670 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/65a3cacd9307277d2c15e32c391395ee4d10278b0cde3e7007004223bb26c6c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:028ba148d2a5a97e66e7ddb36f9e91f6b154b04ff51d2d103819c03a755bfe1b +size 5069 diff --git a/data/2025/2504_05xxx/2504.05979/images/65ad8232e82ff1d205196c6cd60b284ad9f6d0f84803d62c20199cec22907447.jpg b/data/2025/2504_05xxx/2504.05979/images/65ad8232e82ff1d205196c6cd60b284ad9f6d0f84803d62c20199cec22907447.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ceadf6231d9c921cdbf1030ec723b3b7adeb22f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/65ad8232e82ff1d205196c6cd60b284ad9f6d0f84803d62c20199cec22907447.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e49ad675a7c857d4dbc4ff2cbca858b6567d130e6036a713a21e6631b987356c +size 13665 diff --git a/data/2025/2504_05xxx/2504.05979/images/662a88ccc330476d2b5521ea30b676e65a0af1c7ce47b24527e5328cb5d23989.jpg b/data/2025/2504_05xxx/2504.05979/images/662a88ccc330476d2b5521ea30b676e65a0af1c7ce47b24527e5328cb5d23989.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3068215ad9b03db615b7c4732a806f274cc15ab6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/662a88ccc330476d2b5521ea30b676e65a0af1c7ce47b24527e5328cb5d23989.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:859a36341d677c2d59ac3f2d641fac9cb5e38ca0d94da9603788cdbe7c435fe9 +size 17407 diff --git a/data/2025/2504_05xxx/2504.05979/images/666712e651824dc36d467ea97f632d637319b03414823161f4a04e13a521f767.jpg b/data/2025/2504_05xxx/2504.05979/images/666712e651824dc36d467ea97f632d637319b03414823161f4a04e13a521f767.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9aa2157cfd1e26d86c04c0452fd2585b5d24caf5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/666712e651824dc36d467ea97f632d637319b03414823161f4a04e13a521f767.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07a2945a31ce7274853f8eba179086b75d88f3d576761e695eca2f28478942a1 +size 11395 diff --git a/data/2025/2504_05xxx/2504.05979/images/678977bfcc7731282c3cf0706d2a04a055df0dfc7950b35c45d46e6ffafa9252.jpg b/data/2025/2504_05xxx/2504.05979/images/678977bfcc7731282c3cf0706d2a04a055df0dfc7950b35c45d46e6ffafa9252.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d018aa1208856e3faa4c9ceb02d9766a886884a7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/678977bfcc7731282c3cf0706d2a04a055df0dfc7950b35c45d46e6ffafa9252.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:399c98d41d5c17babed3ff416859a56025183322c81a2db84fefafa6e916e36a +size 999 diff --git a/data/2025/2504_05xxx/2504.05979/images/679acf7cda0f660094595d465faedbcf9b28ca58cbf88966cb6e491aa278d75a.jpg b/data/2025/2504_05xxx/2504.05979/images/679acf7cda0f660094595d465faedbcf9b28ca58cbf88966cb6e491aa278d75a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f7b36b2f59759b9b6d1946ff5d504d95bcf8e36 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/679acf7cda0f660094595d465faedbcf9b28ca58cbf88966cb6e491aa278d75a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1258ca6d3f8ee8bfe1784d307900d5d0f094418316ef61807b9dcec54c09712 +size 13084 diff --git a/data/2025/2504_05xxx/2504.05979/images/67a212cd49f8bf5728997494e42d4d92f076bc6f5c1e66b1dd42cfdcaaf6779c.jpg b/data/2025/2504_05xxx/2504.05979/images/67a212cd49f8bf5728997494e42d4d92f076bc6f5c1e66b1dd42cfdcaaf6779c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe4ce8ee926ee361b42f226183a8b8f21643daf8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/67a212cd49f8bf5728997494e42d4d92f076bc6f5c1e66b1dd42cfdcaaf6779c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:473f294b9a4b65cc1aa9d54f4a8f4ed37354a368683713810dbf5b3338990946 +size 10703 diff --git a/data/2025/2504_05xxx/2504.05979/images/67c90170597a34e84c1311ea2c3f363b473991486ba6bf0a15957d95db598921.jpg b/data/2025/2504_05xxx/2504.05979/images/67c90170597a34e84c1311ea2c3f363b473991486ba6bf0a15957d95db598921.jpg new file mode 100644 index 0000000000000000000000000000000000000000..92b543c0191298fb3425643c8ee0c4d48090fd0f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/67c90170597a34e84c1311ea2c3f363b473991486ba6bf0a15957d95db598921.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7ef152334a3abd19cb1e257d42a76f42a3a70e67075a485441a2952c42f201e +size 15129 diff --git a/data/2025/2504_05xxx/2504.05979/images/67dd368a9fc83144d81f03926e3e616fb1c6241004b301cf8c36f99e0c45dd8d.jpg b/data/2025/2504_05xxx/2504.05979/images/67dd368a9fc83144d81f03926e3e616fb1c6241004b301cf8c36f99e0c45dd8d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2d16c4bcfb1eca904c565d760c39d5769ffa09e3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/67dd368a9fc83144d81f03926e3e616fb1c6241004b301cf8c36f99e0c45dd8d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ca1b61fe937389505c1180eeb8b70cf9be1edb2b4594e4318883561d7921c4f +size 11347 diff --git a/data/2025/2504_05xxx/2504.05979/images/68285fa202a5fdaa42f6a5b5c9a5ad11300451918903881288401c4c6d69b8ef.jpg b/data/2025/2504_05xxx/2504.05979/images/68285fa202a5fdaa42f6a5b5c9a5ad11300451918903881288401c4c6d69b8ef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2ef94dbe8b886c5921efceb946941be968f2e39c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/68285fa202a5fdaa42f6a5b5c9a5ad11300451918903881288401c4c6d69b8ef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe5950d7f71accaa319b0c781448fdcd0802c888476c56f9a1c56ca67b20f447 +size 15434 diff --git a/data/2025/2504_05xxx/2504.05979/images/689143eb07d759080356a1acc014ea1f91236ddde377299fa31b78f9f34ed9bc.jpg b/data/2025/2504_05xxx/2504.05979/images/689143eb07d759080356a1acc014ea1f91236ddde377299fa31b78f9f34ed9bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f33a3756cfa59da29779e190d29c5026fdc6552b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/689143eb07d759080356a1acc014ea1f91236ddde377299fa31b78f9f34ed9bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af246b6040242ddbacd4e355db5841b23632e65efe5c47e04953f78400eba5e7 +size 5047 diff --git a/data/2025/2504_05xxx/2504.05979/images/69763d65a9a155bb94fbc23e0c07823b4fbfe54ae0150f8bf1838f34853a3bce.jpg b/data/2025/2504_05xxx/2504.05979/images/69763d65a9a155bb94fbc23e0c07823b4fbfe54ae0150f8bf1838f34853a3bce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4d2a36bea743bf9b2b3f380525fc8fc616d2c59 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/69763d65a9a155bb94fbc23e0c07823b4fbfe54ae0150f8bf1838f34853a3bce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d348fc0bb54a92b9c2443704896d87d482053d55b98ca287674cbaa009f5f3c +size 5761 diff --git a/data/2025/2504_05xxx/2504.05979/images/69d386fd0754ccef0b8aa405ddd5368febd5edd73da8504508220e1ee0fb3ce3.jpg b/data/2025/2504_05xxx/2504.05979/images/69d386fd0754ccef0b8aa405ddd5368febd5edd73da8504508220e1ee0fb3ce3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31c9fb401dd187ab46de2ae25c32011d4b3cfb18 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/69d386fd0754ccef0b8aa405ddd5368febd5edd73da8504508220e1ee0fb3ce3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14d253c93657099c9a9af6f10d1fa621999e5235961e3b0f81d89866418c3dea +size 15054 diff --git a/data/2025/2504_05xxx/2504.05979/images/69fc7b667b5296c731fc241b01cedfd07a9c8e3fb9b10cdf2db89fc2a34aef2f.jpg b/data/2025/2504_05xxx/2504.05979/images/69fc7b667b5296c731fc241b01cedfd07a9c8e3fb9b10cdf2db89fc2a34aef2f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd55937c17c218cdc49bff2e2d8f617981ed2d02 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/69fc7b667b5296c731fc241b01cedfd07a9c8e3fb9b10cdf2db89fc2a34aef2f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1edbc678ea92a9dbfe8ed5cce25a873b7e0d154c297070b5b6c75ded92fe93dc +size 348699 diff --git a/data/2025/2504_05xxx/2504.05979/images/6a5555b889ead9b7c54a679f8554efb5478d55c2bbab8a2783cd24e9f0e28abb.jpg b/data/2025/2504_05xxx/2504.05979/images/6a5555b889ead9b7c54a679f8554efb5478d55c2bbab8a2783cd24e9f0e28abb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9239f93f7403f9a336abaaf723c42be9b7af1325 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6a5555b889ead9b7c54a679f8554efb5478d55c2bbab8a2783cd24e9f0e28abb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d17a8a2a6d141d0d19a819104117ea878aa8c726ab17e3dc609967a330e45865 +size 12254 diff --git a/data/2025/2504_05xxx/2504.05979/images/6a5d14299085340af2af8df55c7d78da1c76cf94e7c40072a4aa894710067730.jpg b/data/2025/2504_05xxx/2504.05979/images/6a5d14299085340af2af8df55c7d78da1c76cf94e7c40072a4aa894710067730.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4e22732e7ac4c57819a62b14ab4db40a932e63a3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6a5d14299085340af2af8df55c7d78da1c76cf94e7c40072a4aa894710067730.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b547f29146545427d3f0e8e1a45d0acddabe17166547dab4b2c78c96669fe02 +size 9588 diff --git a/data/2025/2504_05xxx/2504.05979/images/6ac4b0f6100f66c6e3a3675e85edf101192eaa5e7afac67acbc73dad433f98df.jpg b/data/2025/2504_05xxx/2504.05979/images/6ac4b0f6100f66c6e3a3675e85edf101192eaa5e7afac67acbc73dad433f98df.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b09ec7d14eae4dd97d7b6e7e3968aa1c48af20a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6ac4b0f6100f66c6e3a3675e85edf101192eaa5e7afac67acbc73dad433f98df.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1b583899011b5e5b455e989c8d4c790d0c04cc0760397c37a33502f6d9f07ad +size 8682 diff --git a/data/2025/2504_05xxx/2504.05979/images/6af19de892cfb5e57fdbd96c732a8b33020d3b991fab34b219b07d9804454a8f.jpg b/data/2025/2504_05xxx/2504.05979/images/6af19de892cfb5e57fdbd96c732a8b33020d3b991fab34b219b07d9804454a8f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ab29687368128606958465f4dd7e8daefc261436 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6af19de892cfb5e57fdbd96c732a8b33020d3b991fab34b219b07d9804454a8f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d541e9749690d00ee8e59273bbdd82bd814c0cd8d7b1f2b8845575d927e188f +size 10951 diff --git a/data/2025/2504_05xxx/2504.05979/images/6b14e73a8dcd42c4119885d683c4b57a1f2394b2ba3d7cfed0c412d829af1eb7.jpg b/data/2025/2504_05xxx/2504.05979/images/6b14e73a8dcd42c4119885d683c4b57a1f2394b2ba3d7cfed0c412d829af1eb7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56e5243be4553e18f73a6bb4783fa130c850cc6e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6b14e73a8dcd42c4119885d683c4b57a1f2394b2ba3d7cfed0c412d829af1eb7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acd708ec0444d0ea40537a7916f4f2d24e2b3eaf1b2aebf2ec0537321c75ea89 +size 10770 diff --git a/data/2025/2504_05xxx/2504.05979/images/6b407e152dd4b4cace6037d3643ed856c1e206632e4bbccf517c4fef9f21d5ff.jpg b/data/2025/2504_05xxx/2504.05979/images/6b407e152dd4b4cace6037d3643ed856c1e206632e4bbccf517c4fef9f21d5ff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..511790e805cdbfe1313a5e2b250d5c826c5b2e4b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6b407e152dd4b4cace6037d3643ed856c1e206632e4bbccf517c4fef9f21d5ff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7557825d0f5c9068bce5bf5b07d8063f237fb9b3043cf76bc77d812e0f5c8c5 +size 12264 diff --git a/data/2025/2504_05xxx/2504.05979/images/6b703d57fd8284c322132b56c18ad285d5a472bca055f06fdf9ddb620574ba3c.jpg b/data/2025/2504_05xxx/2504.05979/images/6b703d57fd8284c322132b56c18ad285d5a472bca055f06fdf9ddb620574ba3c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f767ebae38eddaf01fb8e7d8d25dad87c924e28 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6b703d57fd8284c322132b56c18ad285d5a472bca055f06fdf9ddb620574ba3c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07c446c9878113bb13807c57bbf2f2d0d3ec507664126a5dd0ffc49277ea8c16 +size 6673 diff --git a/data/2025/2504_05xxx/2504.05979/images/6b9df00e9d9d77d03f6e7d0327a679c41ab300965c8160ed8995d13466569736.jpg b/data/2025/2504_05xxx/2504.05979/images/6b9df00e9d9d77d03f6e7d0327a679c41ab300965c8160ed8995d13466569736.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f719912d6c9578309f456701fc5f8ba7925ef7b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6b9df00e9d9d77d03f6e7d0327a679c41ab300965c8160ed8995d13466569736.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a51ab21bddef03504cba51bc75f367d609885a81b1db580928fb7a0c0d730c4 +size 13265 diff --git a/data/2025/2504_05xxx/2504.05979/images/6bd024f68b4e3ada55b3eb581d5cca2d29125289402827758186fcce06573946.jpg b/data/2025/2504_05xxx/2504.05979/images/6bd024f68b4e3ada55b3eb581d5cca2d29125289402827758186fcce06573946.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54c28cee59b53d31ddb4b12bfcd01d251d33bb98 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6bd024f68b4e3ada55b3eb581d5cca2d29125289402827758186fcce06573946.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9d6a36d158e383d39bf21fa4d2437ca2122e02e32929f01749a128cff6462bb +size 14268 diff --git a/data/2025/2504_05xxx/2504.05979/images/6bd20b3ebf4a99d1a659fb4acf85915aae6729d1dae0be2eba9584761c9e9db7.jpg b/data/2025/2504_05xxx/2504.05979/images/6bd20b3ebf4a99d1a659fb4acf85915aae6729d1dae0be2eba9584761c9e9db7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..df96ad3ad249039b45ca7b1953c91bb7d3b8f591 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6bd20b3ebf4a99d1a659fb4acf85915aae6729d1dae0be2eba9584761c9e9db7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d753f4b13ab3aa004847421939da9b6af2f34f9d4759562786c7fd8c3657584e +size 9102 diff --git a/data/2025/2504_05xxx/2504.05979/images/6bf819c8a7dad12a034e9854583b927e93387ef6c95c36f44d5e4a7ebbd9eef8.jpg b/data/2025/2504_05xxx/2504.05979/images/6bf819c8a7dad12a034e9854583b927e93387ef6c95c36f44d5e4a7ebbd9eef8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ebbe2fa430e767ea51f88870ad051dbdc0b2b23d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6bf819c8a7dad12a034e9854583b927e93387ef6c95c36f44d5e4a7ebbd9eef8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db1e1153efda6d5d86cbe2b6126b49c21a8d653d74e4722e6b7e96837f824814 +size 10259 diff --git a/data/2025/2504_05xxx/2504.05979/images/6cecd99cb28dc56f82b143d8d1bfc81a9ffb34558b21ff22a2629368564a9e5f.jpg b/data/2025/2504_05xxx/2504.05979/images/6cecd99cb28dc56f82b143d8d1bfc81a9ffb34558b21ff22a2629368564a9e5f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0622ff73097f0325069f8561f2d7b124f022f9c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6cecd99cb28dc56f82b143d8d1bfc81a9ffb34558b21ff22a2629368564a9e5f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab8a1a9f20807f32ace374ce71b392ed0348a58d07c51facdb79760aa1ad17b3 +size 14704 diff --git a/data/2025/2504_05xxx/2504.05979/images/6e8cda0469a84878dd4813f102a0c8e09c7e5e7f7b3d81edb91ab19c428a22a1.jpg b/data/2025/2504_05xxx/2504.05979/images/6e8cda0469a84878dd4813f102a0c8e09c7e5e7f7b3d81edb91ab19c428a22a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..239f42937caa2e21a447127755c6f7193eedbb74 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6e8cda0469a84878dd4813f102a0c8e09c7e5e7f7b3d81edb91ab19c428a22a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cdaff3838021c2e9dc825e72892e35eecd0d29236d52ff2039ae884158cb854 +size 10671 diff --git a/data/2025/2504_05xxx/2504.05979/images/6e8d8fa6a7fc992e352dc0100c93543a6693728f8a6fe64aac51f54c477a6736.jpg b/data/2025/2504_05xxx/2504.05979/images/6e8d8fa6a7fc992e352dc0100c93543a6693728f8a6fe64aac51f54c477a6736.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87ca9fb04142d130592969cc7295fcbbf19de4b7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6e8d8fa6a7fc992e352dc0100c93543a6693728f8a6fe64aac51f54c477a6736.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5daa3ec405f17eb66af9e1cdf830fc047957971ae8a9363393ea446e1c340daa +size 17521 diff --git a/data/2025/2504_05xxx/2504.05979/images/6e8e2c78c4556e12bb41898d5bd695dbc241b0124a7f8964bc5853e2c5f14dcc.jpg b/data/2025/2504_05xxx/2504.05979/images/6e8e2c78c4556e12bb41898d5bd695dbc241b0124a7f8964bc5853e2c5f14dcc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..02a7be45d8ed64a4cf1d20bb76f2dcc50135eca3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6e8e2c78c4556e12bb41898d5bd695dbc241b0124a7f8964bc5853e2c5f14dcc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ced377993cbcd8eaabce394a0e949b98e980956753f65dd4edc1aceeefb4cb93 +size 9752 diff --git a/data/2025/2504_05xxx/2504.05979/images/6ec2422a775f3f017131805ebc131bf30d8d32953e433ab7a7e35ce013d814bd.jpg b/data/2025/2504_05xxx/2504.05979/images/6ec2422a775f3f017131805ebc131bf30d8d32953e433ab7a7e35ce013d814bd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f563721be8563b3c0e8b0663fcad0163a0bd549 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6ec2422a775f3f017131805ebc131bf30d8d32953e433ab7a7e35ce013d814bd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f54e8874087b0151122e054471c3553a6ad6579c61886bc1575f60bb9305f63b +size 14336 diff --git a/data/2025/2504_05xxx/2504.05979/images/6f7393447d4c0dd9cc43b403fb767ccae61d13073cbf4e7b6de1e3bbffbc5cfd.jpg b/data/2025/2504_05xxx/2504.05979/images/6f7393447d4c0dd9cc43b403fb767ccae61d13073cbf4e7b6de1e3bbffbc5cfd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ab3e1a641b7ab002182a22c1469c58ef02a6534 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/6f7393447d4c0dd9cc43b403fb767ccae61d13073cbf4e7b6de1e3bbffbc5cfd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d9a4aeaca083fa4d898595f96570e01b3e96301631ebcab8101af68acd0fcd6 +size 8563 diff --git a/data/2025/2504_05xxx/2504.05979/images/70346a54c2888c50ce4473234e1e5a51c19e9b3df375364d24bb2a98a5a7153a.jpg b/data/2025/2504_05xxx/2504.05979/images/70346a54c2888c50ce4473234e1e5a51c19e9b3df375364d24bb2a98a5a7153a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9accb8cf57c2944ef34588f38cce6624725a3a07 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/70346a54c2888c50ce4473234e1e5a51c19e9b3df375364d24bb2a98a5a7153a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:038be5139b99a584e28ae5ad143783a3f7beb42dce04a68a82a8f6d74ea922e3 +size 8486 diff --git a/data/2025/2504_05xxx/2504.05979/images/70b3977d31710e5b25126e9195e5448eee93a4c761b993c52e1db1cb26c59702.jpg b/data/2025/2504_05xxx/2504.05979/images/70b3977d31710e5b25126e9195e5448eee93a4c761b993c52e1db1cb26c59702.jpg new file mode 100644 index 0000000000000000000000000000000000000000..439b9177588430628e6955ff8dbabfc47fe39c55 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/70b3977d31710e5b25126e9195e5448eee93a4c761b993c52e1db1cb26c59702.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8a2a776a4015f09fcaa07321b84cc71e6f369f8e2b87b9cbe21da69986309a1 +size 14853 diff --git a/data/2025/2504_05xxx/2504.05979/images/70e1fc27eedb02031d28c9885667468714afce1b37646dd33a83ef114dce4e78.jpg b/data/2025/2504_05xxx/2504.05979/images/70e1fc27eedb02031d28c9885667468714afce1b37646dd33a83ef114dce4e78.jpg new file mode 100644 index 0000000000000000000000000000000000000000..13f7c0bad9f374c0f0f83bc6132fd6815740268e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/70e1fc27eedb02031d28c9885667468714afce1b37646dd33a83ef114dce4e78.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5edf335301eaebc6869888b9f797623462a7ec409632a40637536fe8463d7ef5 +size 9511 diff --git a/data/2025/2504_05xxx/2504.05979/images/70e41d785dd49e7ac3f98b75bebf61abc711768215fa3f8fc2105d70e0467d9d.jpg b/data/2025/2504_05xxx/2504.05979/images/70e41d785dd49e7ac3f98b75bebf61abc711768215fa3f8fc2105d70e0467d9d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f7772b0502547655730892a50eb8e5348fd2fbd9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/70e41d785dd49e7ac3f98b75bebf61abc711768215fa3f8fc2105d70e0467d9d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e096120ade1ec49e403842b72e11d4fc27aa0bf9805da5a9bcef3b0cb7b53c6 +size 11883 diff --git a/data/2025/2504_05xxx/2504.05979/images/72211f5127f3ea905b058ad3d92ebb6f194bea63cc537e32cd09a144926d10d7.jpg b/data/2025/2504_05xxx/2504.05979/images/72211f5127f3ea905b058ad3d92ebb6f194bea63cc537e32cd09a144926d10d7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1141763d59f2a6a7bdbf529c6629498287773007 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/72211f5127f3ea905b058ad3d92ebb6f194bea63cc537e32cd09a144926d10d7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:200da2ff169ca61687a2ed7409e34efa440f9375db88bf3f11084b91521acb24 +size 6800 diff --git a/data/2025/2504_05xxx/2504.05979/images/73484faa2e3dc80819041f2a89b22046ef3ff187c2c48e51c3cfc6d540a16621.jpg b/data/2025/2504_05xxx/2504.05979/images/73484faa2e3dc80819041f2a89b22046ef3ff187c2c48e51c3cfc6d540a16621.jpg new file mode 100644 index 0000000000000000000000000000000000000000..12ead3e7d0c45131796b3e181a8c9539efee2ae2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/73484faa2e3dc80819041f2a89b22046ef3ff187c2c48e51c3cfc6d540a16621.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c241bc65bc16d11c06dda35e3c6aa4543ab62963c09eedc67fbd304588638e9 +size 7394 diff --git a/data/2025/2504_05xxx/2504.05979/images/7397b99d3dbd4f1f9b6ee30e701e62c0f7e12919bfa41da478790936477b2c34.jpg b/data/2025/2504_05xxx/2504.05979/images/7397b99d3dbd4f1f9b6ee30e701e62c0f7e12919bfa41da478790936477b2c34.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e350ddf0b4951e07cb4632839e0932d8659988f0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7397b99d3dbd4f1f9b6ee30e701e62c0f7e12919bfa41da478790936477b2c34.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fce835e53341118b8139855e23e767e8d4ba57bd116071a17ae1c68b9479a47 +size 9915 diff --git a/data/2025/2504_05xxx/2504.05979/images/73a85fa3c615d1faa5d51ccafc720c7d1d72730add58f7b85b6f81378602781d.jpg b/data/2025/2504_05xxx/2504.05979/images/73a85fa3c615d1faa5d51ccafc720c7d1d72730add58f7b85b6f81378602781d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7c3be7a710fbb22e1ad48508ecdcd21dd9f6604d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/73a85fa3c615d1faa5d51ccafc720c7d1d72730add58f7b85b6f81378602781d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb35f3306da3c7bd6b4812abbde64bf06011c47b20e223ef33497dc327397cd4 +size 6546 diff --git a/data/2025/2504_05xxx/2504.05979/images/73cf810872486276d591d5946a9ef3dc5549af56b3140dbc1d46cd76cf11381f.jpg b/data/2025/2504_05xxx/2504.05979/images/73cf810872486276d591d5946a9ef3dc5549af56b3140dbc1d46cd76cf11381f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c3c0fcc11e2b7aa8ecefe7fc81db5f0a7230bbed --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/73cf810872486276d591d5946a9ef3dc5549af56b3140dbc1d46cd76cf11381f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf16d247fb32a6095ad1098e3d73b5e9545a6b5bf7bf6f9d5345212fd6264b4a +size 8961 diff --git a/data/2025/2504_05xxx/2504.05979/images/742bbe6bc7ddca81a49887d3b626885d9ffe8e90e72e044edaaa388cf541ef05.jpg b/data/2025/2504_05xxx/2504.05979/images/742bbe6bc7ddca81a49887d3b626885d9ffe8e90e72e044edaaa388cf541ef05.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c4e3dcd07a7faa4cc47a2c18a65ec0a833752072 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/742bbe6bc7ddca81a49887d3b626885d9ffe8e90e72e044edaaa388cf541ef05.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4950fd1fd5bceaf562034914ebd67eba1cb63563def4429ee47eaf51f25f5d8a +size 13773 diff --git a/data/2025/2504_05xxx/2504.05979/images/742bdab0de5d04587ee79a5c9d93f90cfb4494e5a48dcf258a1406c6265bd627.jpg b/data/2025/2504_05xxx/2504.05979/images/742bdab0de5d04587ee79a5c9d93f90cfb4494e5a48dcf258a1406c6265bd627.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42dd76fce27fedac062ec25136b535b2c2e5ed7f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/742bdab0de5d04587ee79a5c9d93f90cfb4494e5a48dcf258a1406c6265bd627.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffda8a49c052b49c46cbb0927b0bba22e02a31cc18f5b6a681d36246a2c72f8b +size 17209 diff --git a/data/2025/2504_05xxx/2504.05979/images/745ba50ac275ead363177aa6a6389523a01b4c236062fe89cb966d79aeafe69b.jpg b/data/2025/2504_05xxx/2504.05979/images/745ba50ac275ead363177aa6a6389523a01b4c236062fe89cb966d79aeafe69b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6d35c1b81d33c1132ed4fa783b2a1bb057b64d07 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/745ba50ac275ead363177aa6a6389523a01b4c236062fe89cb966d79aeafe69b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b90237bc70877ba1a7a3fa5c72abb06990217716442769fddd227133d2cf6009 +size 16372 diff --git a/data/2025/2504_05xxx/2504.05979/images/74777175c863d9f366b45a7843d8eb9164d1d6ccee90d1c36c565abd5a6cd450.jpg b/data/2025/2504_05xxx/2504.05979/images/74777175c863d9f366b45a7843d8eb9164d1d6ccee90d1c36c565abd5a6cd450.jpg new file mode 100644 index 0000000000000000000000000000000000000000..40aaf822bdbbbb0b48e533996440699ba6f35a2e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/74777175c863d9f366b45a7843d8eb9164d1d6ccee90d1c36c565abd5a6cd450.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0f23cb25115d47b471fcf2cce12e05fb88c0f030026a08aed736ed7949317e4 +size 5906 diff --git a/data/2025/2504_05xxx/2504.05979/images/74da7612dd6d92ecd657e1f44bdbfc6138a44b8f5bad9b26166f4cd12e4bcb65.jpg b/data/2025/2504_05xxx/2504.05979/images/74da7612dd6d92ecd657e1f44bdbfc6138a44b8f5bad9b26166f4cd12e4bcb65.jpg new file mode 100644 index 0000000000000000000000000000000000000000..201c9c155e8b9ebd71e54865596b122b3ebd8658 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/74da7612dd6d92ecd657e1f44bdbfc6138a44b8f5bad9b26166f4cd12e4bcb65.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:211df05c3854d1baf0e0514829795accc8e458c6c699cec51ef01f8c36366a2d +size 8206 diff --git a/data/2025/2504_05xxx/2504.05979/images/750636006f7b07b0af11afa452e7037a2d9a06614ababcd014be1498c416fd65.jpg b/data/2025/2504_05xxx/2504.05979/images/750636006f7b07b0af11afa452e7037a2d9a06614ababcd014be1498c416fd65.jpg new file mode 100644 index 0000000000000000000000000000000000000000..180445ebb1ef802a9fbdc0726005ccdd61db9325 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/750636006f7b07b0af11afa452e7037a2d9a06614ababcd014be1498c416fd65.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6134e358f7548f97a4806c9e08620b6bb39821bb8fcee9e90d41ca20a1bb469 +size 11527 diff --git a/data/2025/2504_05xxx/2504.05979/images/751b8b0b723ad8d89ba3583fc6932d850eb7cf8a76f7215ed994d255821fb63e.jpg b/data/2025/2504_05xxx/2504.05979/images/751b8b0b723ad8d89ba3583fc6932d850eb7cf8a76f7215ed994d255821fb63e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45f178139370d3c35a9b18ed4ea1492c66477544 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/751b8b0b723ad8d89ba3583fc6932d850eb7cf8a76f7215ed994d255821fb63e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab714aad9c3cdb94f4ac319bb76ffe4670b15ad1d9c6fdf6e84e6af35828af27 +size 947 diff --git a/data/2025/2504_05xxx/2504.05979/images/7617257b54d51121e2b378531c54403d5f093b828cb72764f28cbaf8e2d75466.jpg b/data/2025/2504_05xxx/2504.05979/images/7617257b54d51121e2b378531c54403d5f093b828cb72764f28cbaf8e2d75466.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa506e88e49e7f1eba77c20ee3dee36636dc4d14 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7617257b54d51121e2b378531c54403d5f093b828cb72764f28cbaf8e2d75466.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19c7d35fca287a9e2d20656e4b0ff8ef943ab20a71b44f8b32fdab79b3ac28f3 +size 5395 diff --git a/data/2025/2504_05xxx/2504.05979/images/76238724fe5435d83934f4d9ceaa07fd016a8c9970f6377cd6eb7afa1409a5d4.jpg b/data/2025/2504_05xxx/2504.05979/images/76238724fe5435d83934f4d9ceaa07fd016a8c9970f6377cd6eb7afa1409a5d4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e49c42112748558ce1e7298a68fe4d5a426cadd5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/76238724fe5435d83934f4d9ceaa07fd016a8c9970f6377cd6eb7afa1409a5d4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d176786e39eacd7f6b48044bc14b44b064005b59eea77749cb26d35d881cb96b +size 4978 diff --git a/data/2025/2504_05xxx/2504.05979/images/763c96045715d809c9e422f9aa615105eac65d7140529630622d8010d75de173.jpg b/data/2025/2504_05xxx/2504.05979/images/763c96045715d809c9e422f9aa615105eac65d7140529630622d8010d75de173.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7363694b871209d7b7000b44227a834a125de665 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/763c96045715d809c9e422f9aa615105eac65d7140529630622d8010d75de173.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07a7b094d47fa957c11acffc1dcc4351eee77af7a549d3b5e6ecda66b6449728 +size 13562 diff --git a/data/2025/2504_05xxx/2504.05979/images/7665c0eede3baa10374170e0e9e1926291832c424771b68766df8bf95ef3f8f9.jpg b/data/2025/2504_05xxx/2504.05979/images/7665c0eede3baa10374170e0e9e1926291832c424771b68766df8bf95ef3f8f9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4085fd130826a77d8ad5da4fa847051c99c3913a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7665c0eede3baa10374170e0e9e1926291832c424771b68766df8bf95ef3f8f9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f749be2a27822764d05e1c40a1adb42fdb9413e33fff325599b9f64a84c01c6f +size 24488 diff --git a/data/2025/2504_05xxx/2504.05979/images/76849dc2c4d82c6863264e63ab6e8135e1109cad0f3635b978fc71a1be56563d.jpg b/data/2025/2504_05xxx/2504.05979/images/76849dc2c4d82c6863264e63ab6e8135e1109cad0f3635b978fc71a1be56563d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db041c2b9932612496666699727bb142c5ffe2b1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/76849dc2c4d82c6863264e63ab6e8135e1109cad0f3635b978fc71a1be56563d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4627298d2b95b4210ef7ca853258fb1b40d1d1f162424ab4b042b43dc761601 +size 4911 diff --git a/data/2025/2504_05xxx/2504.05979/images/76af15fc690cc0e7fcb7cc802c4e12a650c1bda8daa750afc68a7050560ab699.jpg b/data/2025/2504_05xxx/2504.05979/images/76af15fc690cc0e7fcb7cc802c4e12a650c1bda8daa750afc68a7050560ab699.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9261f45884ee0a119e3172ce81eee41dd8724b03 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/76af15fc690cc0e7fcb7cc802c4e12a650c1bda8daa750afc68a7050560ab699.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba1a74db631e55163926c9db62d1ef12259a92d7fc021c1607d8d9b34a32ccd5 +size 10097 diff --git a/data/2025/2504_05xxx/2504.05979/images/76b3ffacb274a3c770ba9db5c2fcea3609020e8b054535fa7f050994e7337844.jpg b/data/2025/2504_05xxx/2504.05979/images/76b3ffacb274a3c770ba9db5c2fcea3609020e8b054535fa7f050994e7337844.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4e2f1087b45e5d7b017f53ec7ecdfd0905ce9cd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/76b3ffacb274a3c770ba9db5c2fcea3609020e8b054535fa7f050994e7337844.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1afafc2016b871362f1bd6905ceacf6f2d722034ec57e4f38b693d2f8a8c5f68 +size 17907 diff --git a/data/2025/2504_05xxx/2504.05979/images/770b79989d7eeca4a1a1dfe41749ee7619d15b0809497750a4ef3f48ae31bf1c.jpg b/data/2025/2504_05xxx/2504.05979/images/770b79989d7eeca4a1a1dfe41749ee7619d15b0809497750a4ef3f48ae31bf1c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..29c9f61f6ff80b4fbe01b1781d0798725a3a680a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/770b79989d7eeca4a1a1dfe41749ee7619d15b0809497750a4ef3f48ae31bf1c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77ffc055684df56e3c159a7bb4d69e85ae12ab9197da57a9e866cc990ff0bbc4 +size 12314 diff --git a/data/2025/2504_05xxx/2504.05979/images/774a2f8158ca42b89d06121387988ac2fa21eab2708a59d2a40c47aeeefeea74.jpg b/data/2025/2504_05xxx/2504.05979/images/774a2f8158ca42b89d06121387988ac2fa21eab2708a59d2a40c47aeeefeea74.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f8f5170bb1ae8985180077003a9de654480c8e0e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/774a2f8158ca42b89d06121387988ac2fa21eab2708a59d2a40c47aeeefeea74.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae8fd4dfb16f4d667aca1fa1e7048c1efebe5ccd3fb88dbd1856269a764e6e2c +size 16056 diff --git a/data/2025/2504_05xxx/2504.05979/images/777c4e2019957c2a3491ab6ec8e5dbcb0731c4d26a35844fe6edc39ec5266953.jpg b/data/2025/2504_05xxx/2504.05979/images/777c4e2019957c2a3491ab6ec8e5dbcb0731c4d26a35844fe6edc39ec5266953.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2e090144c4ee19a6fd35c7ad36050f58945c723 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/777c4e2019957c2a3491ab6ec8e5dbcb0731c4d26a35844fe6edc39ec5266953.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7d651ddae081880bf387e21da2adabfd356e5f5ca050f99f55bde21ed037898 +size 4972 diff --git a/data/2025/2504_05xxx/2504.05979/images/777ec73ae37e58c0b5c2111e81cad23a6a071d747be202d05b8e482979d29b98.jpg b/data/2025/2504_05xxx/2504.05979/images/777ec73ae37e58c0b5c2111e81cad23a6a071d747be202d05b8e482979d29b98.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9c2d0c568df8d7c58484832544e9e3c639ecd80b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/777ec73ae37e58c0b5c2111e81cad23a6a071d747be202d05b8e482979d29b98.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7a5cba78ed21f5b605bcecc9714846935f7952ba1e3a8e0a9f8a008ebfd22d1 +size 14801 diff --git a/data/2025/2504_05xxx/2504.05979/images/784eb3154634368e8aff984cdd03ed74a47d81368a315b4c0bbe827a0051146d.jpg b/data/2025/2504_05xxx/2504.05979/images/784eb3154634368e8aff984cdd03ed74a47d81368a315b4c0bbe827a0051146d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..997a276c0154b213f0c7653a9f91c2461c2f2a67 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/784eb3154634368e8aff984cdd03ed74a47d81368a315b4c0bbe827a0051146d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6838b70217a78d9347486cd59497801a95fe93616fbf59f1cc5db743e52977f1 +size 12159 diff --git a/data/2025/2504_05xxx/2504.05979/images/787c6e3871eb657b7ad2619b07c940f4253e90dda0a171100d9c16bb8ce5179e.jpg b/data/2025/2504_05xxx/2504.05979/images/787c6e3871eb657b7ad2619b07c940f4253e90dda0a171100d9c16bb8ce5179e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0d6d9d4ef4f0c5e4d3b6e19e1b024814d8894fa3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/787c6e3871eb657b7ad2619b07c940f4253e90dda0a171100d9c16bb8ce5179e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85b32f9ac28f47541e5af426f8b49dbd091ddf3cb5c1e473344ea93cab677d0c +size 14181 diff --git a/data/2025/2504_05xxx/2504.05979/images/7968ea927b3c105ef0585b8c69f50258a2601dd8140b619a7c907020835befb5.jpg b/data/2025/2504_05xxx/2504.05979/images/7968ea927b3c105ef0585b8c69f50258a2601dd8140b619a7c907020835befb5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2519fcd2221e43dbd896eb3f1b8a2bcc8da96e16 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7968ea927b3c105ef0585b8c69f50258a2601dd8140b619a7c907020835befb5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f05cbc1a97c4b108f7922c00559854cf9fa07781856b6f3c86db5218bdae680 +size 36354 diff --git a/data/2025/2504_05xxx/2504.05979/images/7a43c02aec8d995221ba9a8b29fbfb292e2b680c4330dfae1451b1cbd1e28f15.jpg b/data/2025/2504_05xxx/2504.05979/images/7a43c02aec8d995221ba9a8b29fbfb292e2b680c4330dfae1451b1cbd1e28f15.jpg new file mode 100644 index 0000000000000000000000000000000000000000..446fc961fb2dfe74369e915b93e2f16a6dc9801f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7a43c02aec8d995221ba9a8b29fbfb292e2b680c4330dfae1451b1cbd1e28f15.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb1b7ea1e84ec3048487f73cf54bfe8c8a7c6fc6ea1991669014889f3f4ca0e8 +size 11649 diff --git a/data/2025/2504_05xxx/2504.05979/images/7a80ead376e4c816f64243eb12d3db5695241cc5988afc361964743b675d9e97.jpg b/data/2025/2504_05xxx/2504.05979/images/7a80ead376e4c816f64243eb12d3db5695241cc5988afc361964743b675d9e97.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31e21b4afa6c1dfbc06a62f473a2ad2036120a2f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7a80ead376e4c816f64243eb12d3db5695241cc5988afc361964743b675d9e97.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90d4d37d22790f0df144d120005b3b7f9e4313f8601e2c06d2ed06b80b446ecb +size 11130 diff --git a/data/2025/2504_05xxx/2504.05979/images/7add957f9d7531f536cfef81731bc6b28f75e12995ba9dbfe3e545658a323c7d.jpg b/data/2025/2504_05xxx/2504.05979/images/7add957f9d7531f536cfef81731bc6b28f75e12995ba9dbfe3e545658a323c7d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db7181b41091f186fceb3b10ff0f9351a9b7f19a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7add957f9d7531f536cfef81731bc6b28f75e12995ba9dbfe3e545658a323c7d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb13cf296b60d31f16aec044258f7a441adbb2127e5f7724ceb79475c306d698 +size 10969 diff --git a/data/2025/2504_05xxx/2504.05979/images/7af8979fc8ec37fdc42425294aa893ae45a15b13d8806f2f8c4eac75eae9952b.jpg b/data/2025/2504_05xxx/2504.05979/images/7af8979fc8ec37fdc42425294aa893ae45a15b13d8806f2f8c4eac75eae9952b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..52494a42f70531e68d03c241c053b44ddc1ba2b4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7af8979fc8ec37fdc42425294aa893ae45a15b13d8806f2f8c4eac75eae9952b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0aa9b953bd5c19b1a120deaecf78876736d11d32f402ad0700614002240848d8 +size 17507 diff --git a/data/2025/2504_05xxx/2504.05979/images/7b45aa773deb1657966b4ed79bd2b961c599f2a4ba279796de04e8ddce076662.jpg b/data/2025/2504_05xxx/2504.05979/images/7b45aa773deb1657966b4ed79bd2b961c599f2a4ba279796de04e8ddce076662.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6e2d3e9444ef02782af371b6e2f5a7c662c06c8d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7b45aa773deb1657966b4ed79bd2b961c599f2a4ba279796de04e8ddce076662.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:253f6d1b0142c5e4666e04da79a5cd2ceb79595ca0f057ce3badfbf2e525aa5f +size 15282 diff --git a/data/2025/2504_05xxx/2504.05979/images/7b4abe90aad35a4b1cd796581e661c9ef27e071aff9b1800f3b7341066fed935.jpg b/data/2025/2504_05xxx/2504.05979/images/7b4abe90aad35a4b1cd796581e661c9ef27e071aff9b1800f3b7341066fed935.jpg new file mode 100644 index 0000000000000000000000000000000000000000..43cf16af2f46b98dd4fad1908fc8b62716d18033 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7b4abe90aad35a4b1cd796581e661c9ef27e071aff9b1800f3b7341066fed935.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2158ccc8166ab79cd4c84092f8499142f0f5ed0c6484f8f3daac936e3a6a8c39 +size 13375 diff --git a/data/2025/2504_05xxx/2504.05979/images/7b58c3184e1b97ea204211d53437c27406bef28c23fb6a50586f6c0f1a5bfe24.jpg b/data/2025/2504_05xxx/2504.05979/images/7b58c3184e1b97ea204211d53437c27406bef28c23fb6a50586f6c0f1a5bfe24.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b6082db1d87860ada9e1a81224a127f92228afa8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7b58c3184e1b97ea204211d53437c27406bef28c23fb6a50586f6c0f1a5bfe24.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bc3daa58ef8808f805a3d76b1c489b62bc1738c289db8ad3abc3478dc8bf435 +size 8468 diff --git a/data/2025/2504_05xxx/2504.05979/images/7b6fe8843d08c99ead6fe3a01df0d1b2466ad14914ede1de9b670e85d3294614.jpg b/data/2025/2504_05xxx/2504.05979/images/7b6fe8843d08c99ead6fe3a01df0d1b2466ad14914ede1de9b670e85d3294614.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c440b51c6ff8fc74bda9ded20fb513b1af3f460d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7b6fe8843d08c99ead6fe3a01df0d1b2466ad14914ede1de9b670e85d3294614.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:431acefa9f5da46825398d4eb201f57440aa692ee46e92faff4cf0bf5763352b +size 16007 diff --git a/data/2025/2504_05xxx/2504.05979/images/7b779ff8fb58b3aae04af13166a3b1967efcf87892bd378886be402ea8763601.jpg b/data/2025/2504_05xxx/2504.05979/images/7b779ff8fb58b3aae04af13166a3b1967efcf87892bd378886be402ea8763601.jpg new file mode 100644 index 0000000000000000000000000000000000000000..78930e526f724f61600621e52cfac456841fafb2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7b779ff8fb58b3aae04af13166a3b1967efcf87892bd378886be402ea8763601.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5a9899bdbbafb9e2a9dfecf3f979ef0df481adfe6d9ebb9431a393967757c7a +size 4688 diff --git a/data/2025/2504_05xxx/2504.05979/images/7b9ec0dd32124c06ec282d499d80f72d58efd6c6b8339cfbdfe41eea456580d6.jpg b/data/2025/2504_05xxx/2504.05979/images/7b9ec0dd32124c06ec282d499d80f72d58efd6c6b8339cfbdfe41eea456580d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fa3fc2ff8773463b9f14b910ee2c4d7b56fbd5ab --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7b9ec0dd32124c06ec282d499d80f72d58efd6c6b8339cfbdfe41eea456580d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe985931d5daa658f7037dc6c31c28128398fc278a2a8db1edf8e9ae756404bb +size 14278 diff --git a/data/2025/2504_05xxx/2504.05979/images/7ba0cc1081bd824ad107543a5dbe34527a4105ea1c1e9c55a5359263cccada3f.jpg b/data/2025/2504_05xxx/2504.05979/images/7ba0cc1081bd824ad107543a5dbe34527a4105ea1c1e9c55a5359263cccada3f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..99eda36d8bd33fbfa510ff1cedd88ab830ddadbd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7ba0cc1081bd824ad107543a5dbe34527a4105ea1c1e9c55a5359263cccada3f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d367ddad829b46b061ab55bd55e2c491c586de13bc03ab72cbaaf707988f6fef +size 4207 diff --git a/data/2025/2504_05xxx/2504.05979/images/7ba7f124d58c88bee0135fc235b0aebd8b6426909a755ac1f40c313c7c9cad81.jpg b/data/2025/2504_05xxx/2504.05979/images/7ba7f124d58c88bee0135fc235b0aebd8b6426909a755ac1f40c313c7c9cad81.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f37f7e9d1433f1240224facd6d1306cb12fef758 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7ba7f124d58c88bee0135fc235b0aebd8b6426909a755ac1f40c313c7c9cad81.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bee307b54049acefade97ef61f6749ecc7a831470191108419119d420f58cf37 +size 6694 diff --git a/data/2025/2504_05xxx/2504.05979/images/7be4bdc21bc3e0a0da4304091bcaaf819d4c752aa483ba4d25c1add751eaadcc.jpg b/data/2025/2504_05xxx/2504.05979/images/7be4bdc21bc3e0a0da4304091bcaaf819d4c752aa483ba4d25c1add751eaadcc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b0b954fff1743e9283210a67fa8059d1845d384 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7be4bdc21bc3e0a0da4304091bcaaf819d4c752aa483ba4d25c1add751eaadcc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8f6304e06205acdf29bf084f287a3b4625863fd7aa71ad1785db2bb466e4c18 +size 13257 diff --git a/data/2025/2504_05xxx/2504.05979/images/7bf3e8e8fcd88c5d02dc116f91c2aa8891be20f22eda6960e71e5c24543006c7.jpg b/data/2025/2504_05xxx/2504.05979/images/7bf3e8e8fcd88c5d02dc116f91c2aa8891be20f22eda6960e71e5c24543006c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cebf502fdab57d23c3519752f2274fc3cfcc02f7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7bf3e8e8fcd88c5d02dc116f91c2aa8891be20f22eda6960e71e5c24543006c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94a7be95292cde3cb986b41f205211e3ddb07b804d450a5b0dea7faa90095aea +size 16575 diff --git a/data/2025/2504_05xxx/2504.05979/images/7c0c2c96243ce0e8cc18e9a4df91dbe8e7777df73105a2ceb18d177a514d9735.jpg b/data/2025/2504_05xxx/2504.05979/images/7c0c2c96243ce0e8cc18e9a4df91dbe8e7777df73105a2ceb18d177a514d9735.jpg new file mode 100644 index 0000000000000000000000000000000000000000..026c11d61666a8d86fdc9f40ff3b41416a4c87f6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7c0c2c96243ce0e8cc18e9a4df91dbe8e7777df73105a2ceb18d177a514d9735.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a518c1e0f3300f0d9c9f4edc33aa372b2627f5848ade3f16a714268715d3e424 +size 7931 diff --git a/data/2025/2504_05xxx/2504.05979/images/7c6e1db18970fe110e86871683e72f4fceaeb6a8c3e0c16fe2dfb612916f8a04.jpg b/data/2025/2504_05xxx/2504.05979/images/7c6e1db18970fe110e86871683e72f4fceaeb6a8c3e0c16fe2dfb612916f8a04.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20e09a4cd3ef4737f13092beb4b8125c638610b8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7c6e1db18970fe110e86871683e72f4fceaeb6a8c3e0c16fe2dfb612916f8a04.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:807c312558dc9bf643d820c97b05d040c93a60558bda7a1576b1d84b7b915dc6 +size 16132 diff --git a/data/2025/2504_05xxx/2504.05979/images/7c783389b6755b3395ca901bd7e2c2f77ed95615d85ca58ae5a381dd0a4cbd5e.jpg b/data/2025/2504_05xxx/2504.05979/images/7c783389b6755b3395ca901bd7e2c2f77ed95615d85ca58ae5a381dd0a4cbd5e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..29cbcb75c2e3592c6b89ced0df28cbc80e9ae408 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7c783389b6755b3395ca901bd7e2c2f77ed95615d85ca58ae5a381dd0a4cbd5e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6aa7265c33d93201563fa408d38f8bddb210566359ebdd72960c1710a515fe4b +size 7632 diff --git a/data/2025/2504_05xxx/2504.05979/images/7c825dcf01a94678b59a1ac7f191122428497b7adf966110443aeb1232b68996.jpg b/data/2025/2504_05xxx/2504.05979/images/7c825dcf01a94678b59a1ac7f191122428497b7adf966110443aeb1232b68996.jpg new file mode 100644 index 0000000000000000000000000000000000000000..95e5f9f2c33b11e8d694d2ce2b7561fbddd8de6e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7c825dcf01a94678b59a1ac7f191122428497b7adf966110443aeb1232b68996.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97a2c6874d11410dc102c89f021e8c97c2d04be62137f160d67f89388516691f +size 9117 diff --git a/data/2025/2504_05xxx/2504.05979/images/7dd57547ecd2c2e9b99488cd75ff2d9c6688588f611f0c7b10caea435b7cf9dd.jpg b/data/2025/2504_05xxx/2504.05979/images/7dd57547ecd2c2e9b99488cd75ff2d9c6688588f611f0c7b10caea435b7cf9dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a9d0dd761ec038d78ce7d1dcf5dcda3c5a61144 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7dd57547ecd2c2e9b99488cd75ff2d9c6688588f611f0c7b10caea435b7cf9dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a05b1831536255537b3bc3f1617b1b4ddf1aa304b6f94b358bd9746911d8842 +size 16682 diff --git a/data/2025/2504_05xxx/2504.05979/images/7e5a6dca5061982b8b4f23f693eb84c50a375b77e068c461c42e68104c157985.jpg b/data/2025/2504_05xxx/2504.05979/images/7e5a6dca5061982b8b4f23f693eb84c50a375b77e068c461c42e68104c157985.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd77c4b28b1a557520340af60506d71b980b318c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7e5a6dca5061982b8b4f23f693eb84c50a375b77e068c461c42e68104c157985.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46d9a78f32dd521905ed4d0b3ca92b806772a2f5a4b267eb9044d877894158b3 +size 7301 diff --git a/data/2025/2504_05xxx/2504.05979/images/7e6bb403295173dc6c676365988dcd5fd4a615b8d4e09058c0895add12fbd47c.jpg b/data/2025/2504_05xxx/2504.05979/images/7e6bb403295173dc6c676365988dcd5fd4a615b8d4e09058c0895add12fbd47c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a05317500248842f4abde342f42ab80e073d4780 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7e6bb403295173dc6c676365988dcd5fd4a615b8d4e09058c0895add12fbd47c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9aec542aeec15bbb822ba51ea5b52f0037b1365b6d5dd33ceae7703e86dbfcc0 +size 5857 diff --git a/data/2025/2504_05xxx/2504.05979/images/7e7b185220c6a783809189a0317a2ebff5e610e53e31f229bc61cc66cb9833ff.jpg b/data/2025/2504_05xxx/2504.05979/images/7e7b185220c6a783809189a0317a2ebff5e610e53e31f229bc61cc66cb9833ff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..518d70eb80b93fca57cc7954132ec36d4b5fdd18 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7e7b185220c6a783809189a0317a2ebff5e610e53e31f229bc61cc66cb9833ff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3effb31af9ccb27360241a7432f945ec295312343fddb2f85be6f030fc94ec8d +size 7766 diff --git a/data/2025/2504_05xxx/2504.05979/images/7e92a2b49a69f9d2edbf84340e1fa8c4dbc2cf0e60655a02c6d49818f70c5d3d.jpg b/data/2025/2504_05xxx/2504.05979/images/7e92a2b49a69f9d2edbf84340e1fa8c4dbc2cf0e60655a02c6d49818f70c5d3d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..362b6ab4f34f771e73dcf241d2f8016253eefc68 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7e92a2b49a69f9d2edbf84340e1fa8c4dbc2cf0e60655a02c6d49818f70c5d3d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a55375f7d7183697ebbf899f499df234f52be1258dae388153fa1d0e5b772dc8 +size 13019 diff --git a/data/2025/2504_05xxx/2504.05979/images/7ea2069c70699c15434b740761c5c8214d80002c7982c766b3b5d6af3f7b2f9d.jpg b/data/2025/2504_05xxx/2504.05979/images/7ea2069c70699c15434b740761c5c8214d80002c7982c766b3b5d6af3f7b2f9d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea0def0cc3536a6291d148e5ce3a005af1900f95 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7ea2069c70699c15434b740761c5c8214d80002c7982c766b3b5d6af3f7b2f9d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fff11e68465052cb40e1ec6c66821eae6c2b4bdb5216dca6808db05a0653fc0 +size 13584 diff --git a/data/2025/2504_05xxx/2504.05979/images/7ea976f449a8043690de06844c2b94aafce18ca58e6beea0e7e26544e3ab2899.jpg b/data/2025/2504_05xxx/2504.05979/images/7ea976f449a8043690de06844c2b94aafce18ca58e6beea0e7e26544e3ab2899.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee4a614756aeaf6f75774227925cbc05a283ae33 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7ea976f449a8043690de06844c2b94aafce18ca58e6beea0e7e26544e3ab2899.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84df734528f2c8bfff4ad052d31aa293af161443f71d8175fefc915e5a2d87a3 +size 9515 diff --git a/data/2025/2504_05xxx/2504.05979/images/7ed09698c0927be9038e1132fc7243c7e628f5e5c041880670c7d1ac190273ad.jpg b/data/2025/2504_05xxx/2504.05979/images/7ed09698c0927be9038e1132fc7243c7e628f5e5c041880670c7d1ac190273ad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dd34a5a9dc157b70f90247f334ecdb69de90e150 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7ed09698c0927be9038e1132fc7243c7e628f5e5c041880670c7d1ac190273ad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0e3a7f75d0b914e394e382af5b162a8d96f59c54ef4b466457d936ba3672a50 +size 18661 diff --git a/data/2025/2504_05xxx/2504.05979/images/7ee4ab20360492c6594cd85f6e0806bdd6ac326a6b9f951620d2d3e338cdc66e.jpg b/data/2025/2504_05xxx/2504.05979/images/7ee4ab20360492c6594cd85f6e0806bdd6ac326a6b9f951620d2d3e338cdc66e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6db7daa157312c82c744dcaa10ce529516d29e7d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7ee4ab20360492c6594cd85f6e0806bdd6ac326a6b9f951620d2d3e338cdc66e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66719c0ad94d29c7d89513bbf6c73d9afde9f68b5476809aa61702d8082921c5 +size 941 diff --git a/data/2025/2504_05xxx/2504.05979/images/7f93176c11f8bb6fd47383525af946efe1f62c62e955b8c39ee12e4183f7af70.jpg b/data/2025/2504_05xxx/2504.05979/images/7f93176c11f8bb6fd47383525af946efe1f62c62e955b8c39ee12e4183f7af70.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb5cba979997f59ed0852562be658d77efbb2d04 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7f93176c11f8bb6fd47383525af946efe1f62c62e955b8c39ee12e4183f7af70.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfa56f0726374e988fd1220c1784071cf94a27e248ff7d2d43aa1d5bf34e40a5 +size 12065 diff --git a/data/2025/2504_05xxx/2504.05979/images/7fae08c7d278ec0abfd7dfa99c03539fe6218331e03f2ae1578f75bab7ed9747.jpg b/data/2025/2504_05xxx/2504.05979/images/7fae08c7d278ec0abfd7dfa99c03539fe6218331e03f2ae1578f75bab7ed9747.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0964009b3cd74bcb87f3b9760dc89ede5a74f73c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7fae08c7d278ec0abfd7dfa99c03539fe6218331e03f2ae1578f75bab7ed9747.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cb21f38cf3c0d71474b661292217df1c91c8703379a02a0780f5aa333746578 +size 10676 diff --git a/data/2025/2504_05xxx/2504.05979/images/7ff25ac80d044220739d61284585a070ee9142eede21fe8dbf47dff3cb2ffa9c.jpg b/data/2025/2504_05xxx/2504.05979/images/7ff25ac80d044220739d61284585a070ee9142eede21fe8dbf47dff3cb2ffa9c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..148cade73c99a66da50cf5d1c26fef5c51b75363 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/7ff25ac80d044220739d61284585a070ee9142eede21fe8dbf47dff3cb2ffa9c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbfc1210383f976c5d4f82c4a834f9588b61f9fbd4b48516f0d7c0ae898427b2 +size 954 diff --git a/data/2025/2504_05xxx/2504.05979/images/80922918a65d3be2dc64726ee8e795ae73b3e97479ae0a61fb7cbf01bf4cb9c1.jpg b/data/2025/2504_05xxx/2504.05979/images/80922918a65d3be2dc64726ee8e795ae73b3e97479ae0a61fb7cbf01bf4cb9c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..96112a9cf83b5d9bdbea835457a131330523b196 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/80922918a65d3be2dc64726ee8e795ae73b3e97479ae0a61fb7cbf01bf4cb9c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c3baa59d0130e264b5de517c0e0a91706f3e1b603c3a81b7c30aedcd59209fd +size 948 diff --git a/data/2025/2504_05xxx/2504.05979/images/80963c1dbf62d49450d0bd1cbdf60c75eaa325a5af95ee28f199c5cd0699b838.jpg b/data/2025/2504_05xxx/2504.05979/images/80963c1dbf62d49450d0bd1cbdf60c75eaa325a5af95ee28f199c5cd0699b838.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d258b6cadb7f1068d3aa60084738b76697ef7039 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/80963c1dbf62d49450d0bd1cbdf60c75eaa325a5af95ee28f199c5cd0699b838.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d08ac358c2c442e6c3437eb4372a236866ab7d4406f1fa65e2fb5c9bbb457919 +size 9755 diff --git a/data/2025/2504_05xxx/2504.05979/images/8107bc9ed92dde4a8075837a95dd9e6711b30dd5e1a18eb7914511ac730f010f.jpg b/data/2025/2504_05xxx/2504.05979/images/8107bc9ed92dde4a8075837a95dd9e6711b30dd5e1a18eb7914511ac730f010f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f2f0afe66fafebfffeded061e9d6c8fea6dacd4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8107bc9ed92dde4a8075837a95dd9e6711b30dd5e1a18eb7914511ac730f010f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af00565cde48a0b21d8b24c2711a9e2d4bff369cbcf9b2ea4b866a36353cadbc +size 7984 diff --git a/data/2025/2504_05xxx/2504.05979/images/8175a842bf2f363a8a70eb3f0550f36e400f3fdd4bd3f80909fb7879a8e130c1.jpg b/data/2025/2504_05xxx/2504.05979/images/8175a842bf2f363a8a70eb3f0550f36e400f3fdd4bd3f80909fb7879a8e130c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b64e7e67a4e9ae90c7b9c8120631c3d25944021 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8175a842bf2f363a8a70eb3f0550f36e400f3fdd4bd3f80909fb7879a8e130c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:148ed16716346c91f889718a0569138f8840b3a2283c7c94700da965ec02d2bf +size 14680 diff --git a/data/2025/2504_05xxx/2504.05979/images/81995073d133c3b619b48c6e202b90c68116f067eb1b41f3fb9d692f8ddaef05.jpg b/data/2025/2504_05xxx/2504.05979/images/81995073d133c3b619b48c6e202b90c68116f067eb1b41f3fb9d692f8ddaef05.jpg new file mode 100644 index 0000000000000000000000000000000000000000..88001a6908f3aec6b0c61e4c28e820ff30aa7d7e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/81995073d133c3b619b48c6e202b90c68116f067eb1b41f3fb9d692f8ddaef05.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88c4e375ec4154756a87192e5d55a393b3060fafd352a2c35a0bfacacad4cc6b +size 8048 diff --git a/data/2025/2504_05xxx/2504.05979/images/81b21df800b94c90849050cd7c2fb977ff2ab50081bab2870fe2b7f18fd3d602.jpg b/data/2025/2504_05xxx/2504.05979/images/81b21df800b94c90849050cd7c2fb977ff2ab50081bab2870fe2b7f18fd3d602.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b2d16a0e9e71e3651765421c3e1d1fcfb95d4b0b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/81b21df800b94c90849050cd7c2fb977ff2ab50081bab2870fe2b7f18fd3d602.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9441cb822e224d37f70ceae4adb900ac67a3ada4ec8e1c2c128f1d2f9fa9f3a +size 8595 diff --git a/data/2025/2504_05xxx/2504.05979/images/8217fbf0fc579e87f7d27802daffe76c3b9b1cdc80d7434cf08598ef201ca65b.jpg b/data/2025/2504_05xxx/2504.05979/images/8217fbf0fc579e87f7d27802daffe76c3b9b1cdc80d7434cf08598ef201ca65b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..801301c063c25e811912127e95342d8d4f765d7b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8217fbf0fc579e87f7d27802daffe76c3b9b1cdc80d7434cf08598ef201ca65b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d65ed53546ad334a99376fee78407f5f8c833cff2aecf704c0ded088f434518 +size 9228 diff --git a/data/2025/2504_05xxx/2504.05979/images/821b29da41b0b8024feaf0f1d198abc77e613c7dd6beb41244e3ee04340d25a7.jpg b/data/2025/2504_05xxx/2504.05979/images/821b29da41b0b8024feaf0f1d198abc77e613c7dd6beb41244e3ee04340d25a7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e1cac5d2a77c467039b4bdda27f68398b3118923 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/821b29da41b0b8024feaf0f1d198abc77e613c7dd6beb41244e3ee04340d25a7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75e5f59f7ab425f26151e08566279ce324b529c1f3438aff78859ea4666287d7 +size 10917 diff --git a/data/2025/2504_05xxx/2504.05979/images/82452cfd1770b08e14ee6fdce267d83915ea906cdd31384da9f70cf3e1cf38c3.jpg b/data/2025/2504_05xxx/2504.05979/images/82452cfd1770b08e14ee6fdce267d83915ea906cdd31384da9f70cf3e1cf38c3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..96a44ce2247f359ba7f3093bf8e779272f9464ec --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/82452cfd1770b08e14ee6fdce267d83915ea906cdd31384da9f70cf3e1cf38c3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56695b9a5538691f6eb1d30e6ba8f563b35deef6920648b0bcce35a0d9d3214d +size 11393 diff --git a/data/2025/2504_05xxx/2504.05979/images/8280c76d3783095ac597ffe6112e8e97d4768a43158ffbeaed59287bec81116b.jpg b/data/2025/2504_05xxx/2504.05979/images/8280c76d3783095ac597ffe6112e8e97d4768a43158ffbeaed59287bec81116b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a85b115b6ed221eaae1730b145e472bc75a53d87 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8280c76d3783095ac597ffe6112e8e97d4768a43158ffbeaed59287bec81116b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:578a50d8629ac3e620e978c99fc1552c18421157ec8d27db12956b790d349ddb +size 16720 diff --git a/data/2025/2504_05xxx/2504.05979/images/82a5f5ec145a93551e9693a7e4e48cedd5a986546f72da6f448a7c2c57ab92ee.jpg b/data/2025/2504_05xxx/2504.05979/images/82a5f5ec145a93551e9693a7e4e48cedd5a986546f72da6f448a7c2c57ab92ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89bca9a1cb815460c61457581b6dab31a5f24ed5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/82a5f5ec145a93551e9693a7e4e48cedd5a986546f72da6f448a7c2c57ab92ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29a9f8d1dbcef300c4ad343dbf6033993fc802ebeb6d6f162119d23178c4194e +size 11637 diff --git a/data/2025/2504_05xxx/2504.05979/images/82c794a5d723f0b36ec6540fc27ae81b30d682d013d9335993d95bcec71b79ae.jpg b/data/2025/2504_05xxx/2504.05979/images/82c794a5d723f0b36ec6540fc27ae81b30d682d013d9335993d95bcec71b79ae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f80288572db2fd06fa62d13fffc30d17fa719c5d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/82c794a5d723f0b36ec6540fc27ae81b30d682d013d9335993d95bcec71b79ae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:caeff61c6f5f9086a4a5ebda73c2ef675e19e16973b4078b4f280020ced37bb3 +size 13086 diff --git a/data/2025/2504_05xxx/2504.05979/images/835c092bc8f3a790fdd3ce19ea243873bcd1e8c977f1e3b28cb6589fd8a4f735.jpg b/data/2025/2504_05xxx/2504.05979/images/835c092bc8f3a790fdd3ce19ea243873bcd1e8c977f1e3b28cb6589fd8a4f735.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d4bba61c8839bdc84c7e54d823bb351e8c7b1a4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/835c092bc8f3a790fdd3ce19ea243873bcd1e8c977f1e3b28cb6589fd8a4f735.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a5e5f6547373faf3aec8421b437e1e9823d79c085fd900cb61aaa783a42fe92 +size 10825 diff --git a/data/2025/2504_05xxx/2504.05979/images/838fcf545b25f35bfeec3bef84c6f0c0db6ecfcb9f88eaec03caefa49c2b0b1c.jpg b/data/2025/2504_05xxx/2504.05979/images/838fcf545b25f35bfeec3bef84c6f0c0db6ecfcb9f88eaec03caefa49c2b0b1c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3bbfd628799a6f70f9768aaa8932209bc792558f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/838fcf545b25f35bfeec3bef84c6f0c0db6ecfcb9f88eaec03caefa49c2b0b1c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b6b9508a20896039ca95bec18c1051779923cdba4d1a88d50d00a5ed274aa0f +size 5698 diff --git a/data/2025/2504_05xxx/2504.05979/images/83a5063b90e2d85ae8723b1c0527ebfeb79cf418f10b31ccefd38f2bc7514bf1.jpg b/data/2025/2504_05xxx/2504.05979/images/83a5063b90e2d85ae8723b1c0527ebfeb79cf418f10b31ccefd38f2bc7514bf1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2b6a8dcb7eb3b90c38ec66b39f51bc15b7c504df --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/83a5063b90e2d85ae8723b1c0527ebfeb79cf418f10b31ccefd38f2bc7514bf1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:678bb3482e37151df4d70f77c053552e4720b3eb73f747411151d2d8e2aacd75 +size 4075 diff --git a/data/2025/2504_05xxx/2504.05979/images/83fabbd535e803f344bff99d3d53a6c8a71d95998a6a377d7bae9a19ebb0830a.jpg b/data/2025/2504_05xxx/2504.05979/images/83fabbd535e803f344bff99d3d53a6c8a71d95998a6a377d7bae9a19ebb0830a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb646a1001b57f051d3de4b1c335b6f1b1fb5da9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/83fabbd535e803f344bff99d3d53a6c8a71d95998a6a377d7bae9a19ebb0830a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:000353374aaea212e909bc9fdfcb1a42b7736f9af79120360dc1bfebfc2bc9bd +size 13871 diff --git a/data/2025/2504_05xxx/2504.05979/images/842316d408aba300ecb8a4b27bfcbba0807e6bf4b66c38430aceb0e69a3016e2.jpg b/data/2025/2504_05xxx/2504.05979/images/842316d408aba300ecb8a4b27bfcbba0807e6bf4b66c38430aceb0e69a3016e2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e4a6f8afd99cc2db265d2c49bcccc9ac6d7188e9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/842316d408aba300ecb8a4b27bfcbba0807e6bf4b66c38430aceb0e69a3016e2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73c05c850069438a41420ee1a3af0905ed4cda48675fd6e64f121544964cd9dc +size 873 diff --git a/data/2025/2504_05xxx/2504.05979/images/851db8a91140cd33caf2d0e7e2e00637b208f9f9b4690e97f12ffd0fbb726feb.jpg b/data/2025/2504_05xxx/2504.05979/images/851db8a91140cd33caf2d0e7e2e00637b208f9f9b4690e97f12ffd0fbb726feb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a23e1e6f986388a137f017ce9d89f9abdb0fc3e1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/851db8a91140cd33caf2d0e7e2e00637b208f9f9b4690e97f12ffd0fbb726feb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf6655f82a3211d1125a6545923dcefb48cc9b7ed09c46743697a63c3bc2e18c +size 7866 diff --git a/data/2025/2504_05xxx/2504.05979/images/855b2a4ebf86a04b3d8e45ed6cc6bfe5358ad56960cbb002301c9715b3a3062e.jpg b/data/2025/2504_05xxx/2504.05979/images/855b2a4ebf86a04b3d8e45ed6cc6bfe5358ad56960cbb002301c9715b3a3062e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b19ad9d3254a7247358f608fe24131146b795f9f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/855b2a4ebf86a04b3d8e45ed6cc6bfe5358ad56960cbb002301c9715b3a3062e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d2590f900d18b82d6983f93c92c83ca875f64f97756866b89a5ede8d582d715 +size 11587 diff --git a/data/2025/2504_05xxx/2504.05979/images/85f4dbb3e1869fc684c51a8d3da40afdad3cedebd548adaef1a8346cf9e23270.jpg b/data/2025/2504_05xxx/2504.05979/images/85f4dbb3e1869fc684c51a8d3da40afdad3cedebd548adaef1a8346cf9e23270.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9bff77977cb1ad416d825d59948dcfe626294aaa --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/85f4dbb3e1869fc684c51a8d3da40afdad3cedebd548adaef1a8346cf9e23270.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3b4ee4bd9f81422bd3da22d5271d671cfca90ad601517903f352e032545cb1f +size 9649 diff --git a/data/2025/2504_05xxx/2504.05979/images/85f7bef2b593291c46513388cafafe9023a5e4f53bc7397e3a888a28d2cda80d.jpg b/data/2025/2504_05xxx/2504.05979/images/85f7bef2b593291c46513388cafafe9023a5e4f53bc7397e3a888a28d2cda80d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..125463e14742f4c244c3881a19408e4ce3f9ec69 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/85f7bef2b593291c46513388cafafe9023a5e4f53bc7397e3a888a28d2cda80d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8838a70af2e6f84f90c1045b0ea874ffe1065647be735dc4b54670f98a8ba457 +size 5586 diff --git a/data/2025/2504_05xxx/2504.05979/images/85f94fe9cc62a3b120dd03edaa6ef6f69b02976c1603bf7df955e05359a6cb30.jpg b/data/2025/2504_05xxx/2504.05979/images/85f94fe9cc62a3b120dd03edaa6ef6f69b02976c1603bf7df955e05359a6cb30.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ff8dc1e68c028327fe3b7f4011a8ac9c117c4551 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/85f94fe9cc62a3b120dd03edaa6ef6f69b02976c1603bf7df955e05359a6cb30.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a06f6d45d94d678a2a745a29b8479c88b79c3affb9631986c91f0a99a2e12910 +size 3388 diff --git a/data/2025/2504_05xxx/2504.05979/images/8619a8168756037dbe0d396ebd1fa868b9e5eebb02bfe0bc848ba11f025e59c5.jpg b/data/2025/2504_05xxx/2504.05979/images/8619a8168756037dbe0d396ebd1fa868b9e5eebb02bfe0bc848ba11f025e59c5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f9fcf5ddbaa59212e7511f674587898c0f764e0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8619a8168756037dbe0d396ebd1fa868b9e5eebb02bfe0bc848ba11f025e59c5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b14d7a7a744588419122ed5f610fbbcccb623ca8ab0594b245886a5bac3f42a +size 16566 diff --git a/data/2025/2504_05xxx/2504.05979/images/875d4fca774409cc984e2951d3b6c99aa12608a7a78632a41fcd84f6bd223082.jpg b/data/2025/2504_05xxx/2504.05979/images/875d4fca774409cc984e2951d3b6c99aa12608a7a78632a41fcd84f6bd223082.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3c5d8890d73bbdf9ef825656a1dc1d2975d3be3b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/875d4fca774409cc984e2951d3b6c99aa12608a7a78632a41fcd84f6bd223082.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fa3b3c0e62f4a619659aed25b27ee59113fe1c3c11c0b13a275f6c1c3b11e7d +size 8995 diff --git a/data/2025/2504_05xxx/2504.05979/images/87edc272818725170e7b6db73459109f3c3967fc70ae14b1e2acffc417d4290d.jpg b/data/2025/2504_05xxx/2504.05979/images/87edc272818725170e7b6db73459109f3c3967fc70ae14b1e2acffc417d4290d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc08fd1ceacd03d9fd915856365e62ce89018596 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/87edc272818725170e7b6db73459109f3c3967fc70ae14b1e2acffc417d4290d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1776b5596bb8b3f640bd4f410b7ab6e7d5b98f371b31ee84546ad854c7b49fb9 +size 12919 diff --git a/data/2025/2504_05xxx/2504.05979/images/887ddccf2929d6f4372e9362922daad36a1526e449412d7bfea8a7165bea64b9.jpg b/data/2025/2504_05xxx/2504.05979/images/887ddccf2929d6f4372e9362922daad36a1526e449412d7bfea8a7165bea64b9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..047bc496c7fe1729169e1a99eb0f919d7fa2c0fd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/887ddccf2929d6f4372e9362922daad36a1526e449412d7bfea8a7165bea64b9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:145477470496963f5c4b5ab7c41f8f184001cc1fbf7b24d31a50356216c77ab1 +size 6242 diff --git a/data/2025/2504_05xxx/2504.05979/images/889fe3842b7457a957d4f92c06ccec58df898a9c56de1a94074c190c063efac7.jpg b/data/2025/2504_05xxx/2504.05979/images/889fe3842b7457a957d4f92c06ccec58df898a9c56de1a94074c190c063efac7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..32e9675963c3cc0854bced3bc68e113f886f4d4a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/889fe3842b7457a957d4f92c06ccec58df898a9c56de1a94074c190c063efac7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:359a155c980df4bf6904e74ab1b4d02980c3b0ec428364c3817b7f07e42182fb +size 8193 diff --git a/data/2025/2504_05xxx/2504.05979/images/88aa98d1328c5f92799e7830df3cc592e78892873c7d450de34f064e06cadf11.jpg b/data/2025/2504_05xxx/2504.05979/images/88aa98d1328c5f92799e7830df3cc592e78892873c7d450de34f064e06cadf11.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a07e579893343101e699bfe3d8c0dbcc8449c742 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/88aa98d1328c5f92799e7830df3cc592e78892873c7d450de34f064e06cadf11.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:624d1d992f1184b7d4a365572024a6e96acb7a11b0e48a2a701ecf8b6e561dfb +size 10042 diff --git a/data/2025/2504_05xxx/2504.05979/images/88eb6272e2083062c89d2da6799a7fbf0bebd2d74c908f4e8fab3e2024206b52.jpg b/data/2025/2504_05xxx/2504.05979/images/88eb6272e2083062c89d2da6799a7fbf0bebd2d74c908f4e8fab3e2024206b52.jpg new file mode 100644 index 0000000000000000000000000000000000000000..43fac7235137f71eaf40f087105e13609df583d7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/88eb6272e2083062c89d2da6799a7fbf0bebd2d74c908f4e8fab3e2024206b52.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25c600c49effc7beea792cacd008d83fa65e6dbcc43997757abb9ff7959287f1 +size 8106 diff --git a/data/2025/2504_05xxx/2504.05979/images/89139fed496336187890615f822656f053268a0928b6b8aad1304fa164575873.jpg b/data/2025/2504_05xxx/2504.05979/images/89139fed496336187890615f822656f053268a0928b6b8aad1304fa164575873.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f1f935eec4248811e97d5724671c7fa6cc4ed9f2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/89139fed496336187890615f822656f053268a0928b6b8aad1304fa164575873.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:991e1c2691b57a9962e7d53ae965e8a98d1c4d8895c9ad5bad2a8b787f7a06c7 +size 14366 diff --git a/data/2025/2504_05xxx/2504.05979/images/89185a1d1d4373656d69efacb10271368adb3f6ab4aa29bb7ce10ccd11ba70a6.jpg b/data/2025/2504_05xxx/2504.05979/images/89185a1d1d4373656d69efacb10271368adb3f6ab4aa29bb7ce10ccd11ba70a6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2d054257a2c329ed430e088a3c158bce2f1ad768 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/89185a1d1d4373656d69efacb10271368adb3f6ab4aa29bb7ce10ccd11ba70a6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20014dbdb7908371e955589b3ab0d3c83df70d65c792ba519a1c8c79f60731c1 +size 7946 diff --git a/data/2025/2504_05xxx/2504.05979/images/896b8444337aa0e1e16c6afd486c4b4f5296517582f2b9dbf6bc16114d53d2ab.jpg b/data/2025/2504_05xxx/2504.05979/images/896b8444337aa0e1e16c6afd486c4b4f5296517582f2b9dbf6bc16114d53d2ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..85642f19ed616056d474b58291ff19408190d587 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/896b8444337aa0e1e16c6afd486c4b4f5296517582f2b9dbf6bc16114d53d2ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c42ea97d6f966b661ed721b00558ae5b0aba498ccd067e94d5609b570570919f +size 5376 diff --git a/data/2025/2504_05xxx/2504.05979/images/898c8f34d0da327db20aba6653163e933c3b881344fb81df6b0f416dff0a65df.jpg b/data/2025/2504_05xxx/2504.05979/images/898c8f34d0da327db20aba6653163e933c3b881344fb81df6b0f416dff0a65df.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2fa927223a9e2007141679857196da2d494eeac5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/898c8f34d0da327db20aba6653163e933c3b881344fb81df6b0f416dff0a65df.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9174d460916f66e07468d95cbfe6da6471720416c5f3a7f7109b05871e7e0bb +size 16328 diff --git a/data/2025/2504_05xxx/2504.05979/images/89990c180c4a599a243294b0904684d0351ed3529a4375d02a3ffb1ea5cc5012.jpg b/data/2025/2504_05xxx/2504.05979/images/89990c180c4a599a243294b0904684d0351ed3529a4375d02a3ffb1ea5cc5012.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8260d86297c27f380bd9354be16dd864621fcaa2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/89990c180c4a599a243294b0904684d0351ed3529a4375d02a3ffb1ea5cc5012.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:313fefb18327c1419e7e1ea333741c171f9cd4d8a1931819ecaf7f1cecb5a883 +size 9578 diff --git a/data/2025/2504_05xxx/2504.05979/images/899da298140f306785ebb56f86b92a0e2dfa83b94136c6477ed20cbea1efa3fe.jpg b/data/2025/2504_05xxx/2504.05979/images/899da298140f306785ebb56f86b92a0e2dfa83b94136c6477ed20cbea1efa3fe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ff89ad80b2760069070fa9622193feb1ec38111 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/899da298140f306785ebb56f86b92a0e2dfa83b94136c6477ed20cbea1efa3fe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39793522aa9d8f72a9db2e3eda01df0447837b0b189846b07212fd392990bbdc +size 7645 diff --git a/data/2025/2504_05xxx/2504.05979/images/899f06b2c3cf50b2d39323fff3222f827df1c1eb152fa1ed8c87460c8158613c.jpg b/data/2025/2504_05xxx/2504.05979/images/899f06b2c3cf50b2d39323fff3222f827df1c1eb152fa1ed8c87460c8158613c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bf02359dc08031d477471f2ec716404b175fb89f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/899f06b2c3cf50b2d39323fff3222f827df1c1eb152fa1ed8c87460c8158613c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bf3e889a93111a2f0691e25d7986826d94c260134a7e117e301f51c1dd289e6 +size 8383 diff --git a/data/2025/2504_05xxx/2504.05979/images/89c711b51bd739f9ec7fcc44d62889f885c5ec915e0fc0cda6d670f5ca049e58.jpg b/data/2025/2504_05xxx/2504.05979/images/89c711b51bd739f9ec7fcc44d62889f885c5ec915e0fc0cda6d670f5ca049e58.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3fbe823e68590895d2f8304f526164533cbd36a4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/89c711b51bd739f9ec7fcc44d62889f885c5ec915e0fc0cda6d670f5ca049e58.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0c6f7e3d824845101fee61f55b5fe49a56fb8287cdf69c8be57d2e1999b83ef +size 11764 diff --git a/data/2025/2504_05xxx/2504.05979/images/89e41678c8fcc68e299a5cf2931d67bb1f2339142bca0c32c1593b4888650519.jpg b/data/2025/2504_05xxx/2504.05979/images/89e41678c8fcc68e299a5cf2931d67bb1f2339142bca0c32c1593b4888650519.jpg new file mode 100644 index 0000000000000000000000000000000000000000..10bdc4d29b5640159ce2f34f68ac9d9d59957085 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/89e41678c8fcc68e299a5cf2931d67bb1f2339142bca0c32c1593b4888650519.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93e403373f674f5495f65410f9a98128a17b25ff0b486425e297e037be8de230 +size 17600 diff --git a/data/2025/2504_05xxx/2504.05979/images/89edb9b677a275a49f6618e18a5f5744c28971461e4544202201e438f92139f3.jpg b/data/2025/2504_05xxx/2504.05979/images/89edb9b677a275a49f6618e18a5f5744c28971461e4544202201e438f92139f3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8fe3ce20c0a67cd690bedce2b099f52522869faa --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/89edb9b677a275a49f6618e18a5f5744c28971461e4544202201e438f92139f3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ef65b6ae0f8def52b17ef30e2200c8f2b2234a0b5b12d5c98f48cc48204288e +size 13690 diff --git a/data/2025/2504_05xxx/2504.05979/images/8a361a39ea1256599aa45e581bc05adce2bd4ba199154e81ac85761e04a5d305.jpg b/data/2025/2504_05xxx/2504.05979/images/8a361a39ea1256599aa45e581bc05adce2bd4ba199154e81ac85761e04a5d305.jpg new file mode 100644 index 0000000000000000000000000000000000000000..83bbc9fbfb9d1094f21ae4d07b129ece5e2183a1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8a361a39ea1256599aa45e581bc05adce2bd4ba199154e81ac85761e04a5d305.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79feb7e1d6b3e0e58f718add975982faaf538b34ff272d1d2b407df6ebf1df28 +size 11947 diff --git a/data/2025/2504_05xxx/2504.05979/images/8acbf5757555da90dab4610d0040e9baac69b0e9e18c478fba61f00f601266a2.jpg b/data/2025/2504_05xxx/2504.05979/images/8acbf5757555da90dab4610d0040e9baac69b0e9e18c478fba61f00f601266a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b9a64e95357cf57a06b9301f4c39cdce25c67f3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8acbf5757555da90dab4610d0040e9baac69b0e9e18c478fba61f00f601266a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac27c5026cd253ae62fe46e387267610bbcdd09c7fb96ec58e219e0c6a32f375 +size 9909 diff --git a/data/2025/2504_05xxx/2504.05979/images/8aee6376931ccae47da7774dcc80f87cb4655a39088d2495592ef2b55cc6e8ce.jpg b/data/2025/2504_05xxx/2504.05979/images/8aee6376931ccae47da7774dcc80f87cb4655a39088d2495592ef2b55cc6e8ce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..139737ce0e4d74d68f37a208c383b2783d3720a5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8aee6376931ccae47da7774dcc80f87cb4655a39088d2495592ef2b55cc6e8ce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22e98e7e88fc2e3001e57e33d29a18ea02eea2554b648d96b88d97e3904338b0 +size 7579 diff --git a/data/2025/2504_05xxx/2504.05979/images/8b47755995c00dd9ee49f071a5906d93f0c148923ccaa69fb15f0c73140fc71c.jpg b/data/2025/2504_05xxx/2504.05979/images/8b47755995c00dd9ee49f071a5906d93f0c148923ccaa69fb15f0c73140fc71c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ecc80c15777e0d8c8ffdd425957a6c0eb630aa7b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8b47755995c00dd9ee49f071a5906d93f0c148923ccaa69fb15f0c73140fc71c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:195d1422aef55bbc8d9e92b98ef3826844a19c9ccf2656a6bd76914b94ec76e4 +size 904 diff --git a/data/2025/2504_05xxx/2504.05979/images/8ba638159d6af8b00306bfa770db1930683a70fb3c84b43764507be8b7b6f124.jpg b/data/2025/2504_05xxx/2504.05979/images/8ba638159d6af8b00306bfa770db1930683a70fb3c84b43764507be8b7b6f124.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ef033cea5ea9f7eadedd20930b2a5b1aa8ccbbc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8ba638159d6af8b00306bfa770db1930683a70fb3c84b43764507be8b7b6f124.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b9f75a8566c46625de44318cac5febac22a4c0684281caf74132fb35b52529d +size 8171 diff --git a/data/2025/2504_05xxx/2504.05979/images/8d12c4277ccc433463d916e0d4c021703de904c087034505189b0c4f6bee4dc4.jpg b/data/2025/2504_05xxx/2504.05979/images/8d12c4277ccc433463d916e0d4c021703de904c087034505189b0c4f6bee4dc4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ebfd10635180c0a26a0acf6e4cf7e946099a8946 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8d12c4277ccc433463d916e0d4c021703de904c087034505189b0c4f6bee4dc4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f097c2e7c4691745ffec4361a6a7a145ccf8c2436cba59ad774110d67b4b20e +size 13890 diff --git a/data/2025/2504_05xxx/2504.05979/images/8d55ee618dd53e3a5e154598b93d67afc79a4d7bf139ce03e31d740af22b29f0.jpg b/data/2025/2504_05xxx/2504.05979/images/8d55ee618dd53e3a5e154598b93d67afc79a4d7bf139ce03e31d740af22b29f0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3d0cac81b456dac3932171fae637af632e0a4ffd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8d55ee618dd53e3a5e154598b93d67afc79a4d7bf139ce03e31d740af22b29f0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91deb72a5919e962617c0137bcc8cf127ce4d3fbfa68479e8457fcde5b7e6dc7 +size 10718 diff --git a/data/2025/2504_05xxx/2504.05979/images/8d843e8b140fd3c6f219b02b54f981e68e8501565fcef5774b45f8e54c5de112.jpg b/data/2025/2504_05xxx/2504.05979/images/8d843e8b140fd3c6f219b02b54f981e68e8501565fcef5774b45f8e54c5de112.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec9e4eeccd9208ed0b7f53875a341f17df653c3b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8d843e8b140fd3c6f219b02b54f981e68e8501565fcef5774b45f8e54c5de112.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fe935c6142587f2512fce0e2f85f045e0e8fba4797a135f4bbbd62bc422cfe5 +size 7629 diff --git a/data/2025/2504_05xxx/2504.05979/images/8da79e08847cb47bf66249a4079de84e4d65ff7744d90920fa8a900bbdbaed53.jpg b/data/2025/2504_05xxx/2504.05979/images/8da79e08847cb47bf66249a4079de84e4d65ff7744d90920fa8a900bbdbaed53.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ba3a13d7d9f71ee24fd69ef8d14a4efd880b9a1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8da79e08847cb47bf66249a4079de84e4d65ff7744d90920fa8a900bbdbaed53.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a8efc49c92db018726208430dceadb2e7f293d4f5374bfef2d6be80313cb357 +size 11047 diff --git a/data/2025/2504_05xxx/2504.05979/images/8de34136e3817eb2b912dec2f52ae54240a4d61a51e8f5d08695f60f906cb9b3.jpg b/data/2025/2504_05xxx/2504.05979/images/8de34136e3817eb2b912dec2f52ae54240a4d61a51e8f5d08695f60f906cb9b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..36bec47f6883dd141636497bbb73350d1b3872ca --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8de34136e3817eb2b912dec2f52ae54240a4d61a51e8f5d08695f60f906cb9b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3572681fafa19afde5c4395ca97f1ab2b8adc21847b9618f726bca46d18e774d +size 13279 diff --git a/data/2025/2504_05xxx/2504.05979/images/8e5d8cc638f17208ad4fb443ff0683574d425e64d854400f82678e0227eca37e.jpg b/data/2025/2504_05xxx/2504.05979/images/8e5d8cc638f17208ad4fb443ff0683574d425e64d854400f82678e0227eca37e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c947d233666dd6fc6e01a148870a1733998570d9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8e5d8cc638f17208ad4fb443ff0683574d425e64d854400f82678e0227eca37e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c53e36eabd3d224c85c1f9f12d2af1a48acf3a28b35b45ee67cda68b8282d6ff +size 9547 diff --git a/data/2025/2504_05xxx/2504.05979/images/8ef5824e83d6803a07b614317f4d908c06c18331051e08b8eef6e454c1be8073.jpg b/data/2025/2504_05xxx/2504.05979/images/8ef5824e83d6803a07b614317f4d908c06c18331051e08b8eef6e454c1be8073.jpg new file mode 100644 index 0000000000000000000000000000000000000000..302547e5c27c2b079610ea07415cf5a7483df04a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8ef5824e83d6803a07b614317f4d908c06c18331051e08b8eef6e454c1be8073.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:048437d62a74dc4e7a0d0f8b9403295a1ae2024c76297e54ed8756e23f72c836 +size 13775 diff --git a/data/2025/2504_05xxx/2504.05979/images/8f1ec7ef5d643c0bfda5c6dcee55e55fbae700384aa8d8ff28e6f695f19abd73.jpg b/data/2025/2504_05xxx/2504.05979/images/8f1ec7ef5d643c0bfda5c6dcee55e55fbae700384aa8d8ff28e6f695f19abd73.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e5a4cc95737f7b6593a5ecb58c6d713e68188a56 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8f1ec7ef5d643c0bfda5c6dcee55e55fbae700384aa8d8ff28e6f695f19abd73.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20279c1db27d7ca0b13f32eaee95f66ee060630ee4087dbb97a8b6715f6f00ce +size 8443 diff --git a/data/2025/2504_05xxx/2504.05979/images/8f284b6c611448bfc951157e9a8d4ed20c6128da83b6852e318506b931aab1de.jpg b/data/2025/2504_05xxx/2504.05979/images/8f284b6c611448bfc951157e9a8d4ed20c6128da83b6852e318506b931aab1de.jpg new file mode 100644 index 0000000000000000000000000000000000000000..588a870a7cae8ee19e9a82b6c4516ae8cc113e27 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8f284b6c611448bfc951157e9a8d4ed20c6128da83b6852e318506b931aab1de.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a69fbadf1495427519efa6b74d66fad192f1011f5164f1b58fdec44b0ae896f +size 8415 diff --git a/data/2025/2504_05xxx/2504.05979/images/8f405db10bccda82cc90e5b0887cf8f372d62c3308cf2e2f187d9b3c87f37919.jpg b/data/2025/2504_05xxx/2504.05979/images/8f405db10bccda82cc90e5b0887cf8f372d62c3308cf2e2f187d9b3c87f37919.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b86ace0f547edc6571c094171a5feed0edd26f33 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8f405db10bccda82cc90e5b0887cf8f372d62c3308cf2e2f187d9b3c87f37919.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9a61fa5c0c7f77bd9e574164a5d8fc9088fd7861c829790508edd358dcfc444 +size 10375 diff --git a/data/2025/2504_05xxx/2504.05979/images/8f533a46531ffe4ae8d62f47eb7fc7615dbfab620a667fb4d1593863f2cb286f.jpg b/data/2025/2504_05xxx/2504.05979/images/8f533a46531ffe4ae8d62f47eb7fc7615dbfab620a667fb4d1593863f2cb286f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..369bdb5e88ee458e251d721879e69907b28b3366 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8f533a46531ffe4ae8d62f47eb7fc7615dbfab620a667fb4d1593863f2cb286f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2526a70796c0d3853afee2424e1879732fb8de4b97ba1173871a89e86632213 +size 12063 diff --git a/data/2025/2504_05xxx/2504.05979/images/8f600bc759b57a060077f8d020e12edc5a0d30fdd50d923a4ba69ef3ab7e3b8a.jpg b/data/2025/2504_05xxx/2504.05979/images/8f600bc759b57a060077f8d020e12edc5a0d30fdd50d923a4ba69ef3ab7e3b8a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ca284cd6c2ebdeb6db9a105c0f89cddf1e65805 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8f600bc759b57a060077f8d020e12edc5a0d30fdd50d923a4ba69ef3ab7e3b8a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be2bf83a6794c615c00b29a7e243193ee382db057b24dfc7a04128874e5366df +size 11526 diff --git a/data/2025/2504_05xxx/2504.05979/images/8fc6d53c4cd8581616940efb6ac02413d37aece5c9462fc0de359572bfb0d570.jpg b/data/2025/2504_05xxx/2504.05979/images/8fc6d53c4cd8581616940efb6ac02413d37aece5c9462fc0de359572bfb0d570.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ded30d7c730f004bb4a8a1de00eda1eb6ab0f087 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8fc6d53c4cd8581616940efb6ac02413d37aece5c9462fc0de359572bfb0d570.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:025e234289553c61efc04d8690feeac3b3f66747ccb19f937b9b7a0e251945f8 +size 13877 diff --git a/data/2025/2504_05xxx/2504.05979/images/8fcfecbfee8e3af8ee12f1909d0f8d6442457efaad84be98a5ae90af97d909d1.jpg b/data/2025/2504_05xxx/2504.05979/images/8fcfecbfee8e3af8ee12f1909d0f8d6442457efaad84be98a5ae90af97d909d1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1dc2a55418a503932ec6ae9f3f6daa66894ab047 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/8fcfecbfee8e3af8ee12f1909d0f8d6442457efaad84be98a5ae90af97d909d1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfab9c92fafb580020cf4e71f0bf43a774227a32a939e7b02a8e75821f254054 +size 12868 diff --git a/data/2025/2504_05xxx/2504.05979/images/900798659953d6bf5da0b4a07b8d732ff649b11f079acb0d0b7f9e3317082e6e.jpg b/data/2025/2504_05xxx/2504.05979/images/900798659953d6bf5da0b4a07b8d732ff649b11f079acb0d0b7f9e3317082e6e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ea69ae7b6d7797f95b34afd4ceb72c403878c26 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/900798659953d6bf5da0b4a07b8d732ff649b11f079acb0d0b7f9e3317082e6e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0710d85a356a1fd40f40b34876d2a78db0cd2d154238e0531a8926ea110159d +size 14251 diff --git a/data/2025/2504_05xxx/2504.05979/images/90148daf477655ba5438aee9571917bb7d337f466c861465128cb7f0a6616246.jpg b/data/2025/2504_05xxx/2504.05979/images/90148daf477655ba5438aee9571917bb7d337f466c861465128cb7f0a6616246.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a18ae9131443dda63ea082ce0b5c2e07472b7b20 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/90148daf477655ba5438aee9571917bb7d337f466c861465128cb7f0a6616246.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:105c8fa055cc5a60a3ff0b80e8ab05565f83c735d111fcef294a2066c9da29c7 +size 6952 diff --git a/data/2025/2504_05xxx/2504.05979/images/902fc2f31952ca02a07900b777fb797fe5708419dadaf611ebdbe7c0305895cd.jpg b/data/2025/2504_05xxx/2504.05979/images/902fc2f31952ca02a07900b777fb797fe5708419dadaf611ebdbe7c0305895cd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a172b19abc29a7bc46cbe28d171269de03f4343 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/902fc2f31952ca02a07900b777fb797fe5708419dadaf611ebdbe7c0305895cd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03b48a8f68428e9791aa4824ef2577835b4ce749bef3b98888440b4b99963638 +size 8668 diff --git a/data/2025/2504_05xxx/2504.05979/images/9077e20f899c60d5e1c26e1748dfa141980bf09da6c613467d42c1143bab34a1.jpg b/data/2025/2504_05xxx/2504.05979/images/9077e20f899c60d5e1c26e1748dfa141980bf09da6c613467d42c1143bab34a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8a844e64b2ad14aa3dfbd181acd2a1535769961 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9077e20f899c60d5e1c26e1748dfa141980bf09da6c613467d42c1143bab34a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07cd08ea114982f369d1ac514d25ad002099dc9e1c27327ed820680fdbf1b478 +size 22337 diff --git a/data/2025/2504_05xxx/2504.05979/images/91219e5a27a8f4d0eede45256f0b86e5561a80dee1a5c603cab39235011f504d.jpg b/data/2025/2504_05xxx/2504.05979/images/91219e5a27a8f4d0eede45256f0b86e5561a80dee1a5c603cab39235011f504d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3ecc53ad52eae11bc840c14ee9d6b9b14a40788 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/91219e5a27a8f4d0eede45256f0b86e5561a80dee1a5c603cab39235011f504d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4694b4caff2841175c0c7610787bff869ae4aebecf6cf9c0768105eeab0b42cb +size 4838 diff --git a/data/2025/2504_05xxx/2504.05979/images/9149d06a0cd845a92028c8f8199da8213692d3c85666f692dd350f78144af753.jpg b/data/2025/2504_05xxx/2504.05979/images/9149d06a0cd845a92028c8f8199da8213692d3c85666f692dd350f78144af753.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d854184c84aea8ae1c07c98bd304db20403336f4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9149d06a0cd845a92028c8f8199da8213692d3c85666f692dd350f78144af753.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a77d5fcaeca6739209db18655d8bfa68602d4a063eb97b6a0b5c4c0d64719eba +size 12205 diff --git a/data/2025/2504_05xxx/2504.05979/images/915474b5475b9cfb5be03f5b44afa9a58d8047bd5530980a2d330a11b5d8294c.jpg b/data/2025/2504_05xxx/2504.05979/images/915474b5475b9cfb5be03f5b44afa9a58d8047bd5530980a2d330a11b5d8294c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42fdc53451f13d05638674504cdba90add501901 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/915474b5475b9cfb5be03f5b44afa9a58d8047bd5530980a2d330a11b5d8294c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:491b404d71efde558537214c787499b9a5f4456e2eb657239b3a7cf88f242c84 +size 11762 diff --git a/data/2025/2504_05xxx/2504.05979/images/915695ce6be4cdda6f09d7deabc426642440f267759651fc6ec01c427928bd50.jpg b/data/2025/2504_05xxx/2504.05979/images/915695ce6be4cdda6f09d7deabc426642440f267759651fc6ec01c427928bd50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c00b3824d98fabd61f3b5508fe7bf7b4afb47ee1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/915695ce6be4cdda6f09d7deabc426642440f267759651fc6ec01c427928bd50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c6eefc584786bfdc51abfb0048fe924e9449d0fee4f7608e81bf5157bc2bd76 +size 9122 diff --git a/data/2025/2504_05xxx/2504.05979/images/9164a12913f12d50e14960da007a5f198f424495fc1f7b5da62b254cd8bd48e1.jpg b/data/2025/2504_05xxx/2504.05979/images/9164a12913f12d50e14960da007a5f198f424495fc1f7b5da62b254cd8bd48e1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec23e0b10758b610ca4ba98e6a3ffbbad8865215 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9164a12913f12d50e14960da007a5f198f424495fc1f7b5da62b254cd8bd48e1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e52b33d336ea14d58a5e7f3cea3dda0e193a7793143c90fe7aaaeee8c45b4d93 +size 15361 diff --git a/data/2025/2504_05xxx/2504.05979/images/91b32ed2b4041faded4ca926aa6a533ba22ec3c950274f75ac5ae3556ef420f0.jpg b/data/2025/2504_05xxx/2504.05979/images/91b32ed2b4041faded4ca926aa6a533ba22ec3c950274f75ac5ae3556ef420f0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ce7aebe06bbeaaf9a46eef9463d3ef39ba71a82 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/91b32ed2b4041faded4ca926aa6a533ba22ec3c950274f75ac5ae3556ef420f0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21b9fe787e0f5a56d26594a051b773a76c248c44628902e39028314f752b39f9 +size 8813 diff --git a/data/2025/2504_05xxx/2504.05979/images/91df94d12a7739436d063cd8168ea4e4d26c88f28f02f9407f230adabe8d29e6.jpg b/data/2025/2504_05xxx/2504.05979/images/91df94d12a7739436d063cd8168ea4e4d26c88f28f02f9407f230adabe8d29e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3e01566dbb14e654145d3d9f79008f3498ed83f7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/91df94d12a7739436d063cd8168ea4e4d26c88f28f02f9407f230adabe8d29e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfb944c496b6e3c7a79ad2bd4cbfb96834b88e8d3225b2d16199e0e5789190e0 +size 6737 diff --git a/data/2025/2504_05xxx/2504.05979/images/92052199d2635356816ee2aba2aed1f8dca8d83b493fd47666fe5985bfd49a89.jpg b/data/2025/2504_05xxx/2504.05979/images/92052199d2635356816ee2aba2aed1f8dca8d83b493fd47666fe5985bfd49a89.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ba5cfe2f6106275aca402f818d6b7a4feadfda4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/92052199d2635356816ee2aba2aed1f8dca8d83b493fd47666fe5985bfd49a89.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05056402fd73df5de04216420b8513cc8aee4d745e8b8188d6e68ca995c00228 +size 12178 diff --git a/data/2025/2504_05xxx/2504.05979/images/92d34852cfd83335f95aa56035188712f19e68bf3f090085cf6440a634a2b66a.jpg b/data/2025/2504_05xxx/2504.05979/images/92d34852cfd83335f95aa56035188712f19e68bf3f090085cf6440a634a2b66a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38940ff7413029b12cd547d57e4f5e4bc4c5704f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/92d34852cfd83335f95aa56035188712f19e68bf3f090085cf6440a634a2b66a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b32fd8532c855207465503c132ed21ba39ed6dea8e335c588b8ffc2df90431b7 +size 12035 diff --git a/data/2025/2504_05xxx/2504.05979/images/93239b7986d7e146b41b57dc317de85153c1f5d166ec11db11dfd7fe2702e17c.jpg b/data/2025/2504_05xxx/2504.05979/images/93239b7986d7e146b41b57dc317de85153c1f5d166ec11db11dfd7fe2702e17c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..751adddf293bb92ff0ccbbac966ff941575b205f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/93239b7986d7e146b41b57dc317de85153c1f5d166ec11db11dfd7fe2702e17c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c454105a0336e9ca0d299429289b07a733e6ee84a6a215fc8b78873355f104f +size 14271 diff --git a/data/2025/2504_05xxx/2504.05979/images/9353609b07179915947a49bbe7cb97c69d989d42af40bfbeb65de7a2a7c68425.jpg b/data/2025/2504_05xxx/2504.05979/images/9353609b07179915947a49bbe7cb97c69d989d42af40bfbeb65de7a2a7c68425.jpg new file mode 100644 index 0000000000000000000000000000000000000000..26f874738822ca91208de48afa024a80b772e62f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9353609b07179915947a49bbe7cb97c69d989d42af40bfbeb65de7a2a7c68425.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99299d24790b382229e5728725f56eb01695f11bcc1d4f8a34c556704ed6997b +size 7475 diff --git a/data/2025/2504_05xxx/2504.05979/images/9410af00d870c0c0323ba5f3b9c30e57a1d3b4116a79dda9d22fbb7f6ef86c7f.jpg b/data/2025/2504_05xxx/2504.05979/images/9410af00d870c0c0323ba5f3b9c30e57a1d3b4116a79dda9d22fbb7f6ef86c7f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db87cd1bf1d624f8440e400e5f8499fb57df4832 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9410af00d870c0c0323ba5f3b9c30e57a1d3b4116a79dda9d22fbb7f6ef86c7f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:747a7da8d1e016438e119e7e172e0e3740c60adf50ede565fa58e8c2bc7c27dc +size 20903 diff --git a/data/2025/2504_05xxx/2504.05979/images/946427a8be6f45ad8290603f01d85b6ddddd4e891e723686e136a789ad2d67dc.jpg b/data/2025/2504_05xxx/2504.05979/images/946427a8be6f45ad8290603f01d85b6ddddd4e891e723686e136a789ad2d67dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..402358727eb4c0da63189b614415be0af8cbdb35 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/946427a8be6f45ad8290603f01d85b6ddddd4e891e723686e136a789ad2d67dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:459b135c9575e4fe46afc4be6427e376dc0a92e247b172c48a48b0bcc882116f +size 5726 diff --git a/data/2025/2504_05xxx/2504.05979/images/946a58c00f3ff3a1895e1b765823d31d38f03bbcd704ed7183effd358ebae58a.jpg b/data/2025/2504_05xxx/2504.05979/images/946a58c00f3ff3a1895e1b765823d31d38f03bbcd704ed7183effd358ebae58a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b430d419e7ad534f5b4333c68b0aad77be018f24 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/946a58c00f3ff3a1895e1b765823d31d38f03bbcd704ed7183effd358ebae58a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b000bf5832c6748fd0bcd1eddd9910ebdccec26664fc396c14bcfc64cbc50d7 +size 16919 diff --git a/data/2025/2504_05xxx/2504.05979/images/94c4c44bd6d4d882958c8eed91e70b3ca5d10dea1f870d92a7661befb223ea99.jpg b/data/2025/2504_05xxx/2504.05979/images/94c4c44bd6d4d882958c8eed91e70b3ca5d10dea1f870d92a7661befb223ea99.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9973a078d8ad517c7db675f61150b04b654b1984 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/94c4c44bd6d4d882958c8eed91e70b3ca5d10dea1f870d92a7661befb223ea99.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c706d4a2a398246da00b6db6dfcb96813a560270c45f1354303bf6fb6ce1943 +size 11017 diff --git a/data/2025/2504_05xxx/2504.05979/images/94ec2cb93bf6619e7cff5d27b701fd16348dc2330513de806c10891f384b214e.jpg b/data/2025/2504_05xxx/2504.05979/images/94ec2cb93bf6619e7cff5d27b701fd16348dc2330513de806c10891f384b214e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a140143c1898db9bf9707b6b077f96723932956e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/94ec2cb93bf6619e7cff5d27b701fd16348dc2330513de806c10891f384b214e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0ce5d082472f38f395b5afb287a81478734608a63d5fc161147c5508425efec +size 11919 diff --git a/data/2025/2504_05xxx/2504.05979/images/959c69e3cc69906dbb804b63c7b374843496df51ef6f1569b100b6937422b431.jpg b/data/2025/2504_05xxx/2504.05979/images/959c69e3cc69906dbb804b63c7b374843496df51ef6f1569b100b6937422b431.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cd08576f85fd3c5547cf8b48f80326b9efbe3210 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/959c69e3cc69906dbb804b63c7b374843496df51ef6f1569b100b6937422b431.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c9f2d7ca4b6d084093a79f2d0ac1fbee9708463d374cef6b0152283e36eb9d4 +size 7211 diff --git a/data/2025/2504_05xxx/2504.05979/images/9682ba919f23fd2efddf43910798451e8e7f75415d38ed0e818f82b242e57922.jpg b/data/2025/2504_05xxx/2504.05979/images/9682ba919f23fd2efddf43910798451e8e7f75415d38ed0e818f82b242e57922.jpg new file mode 100644 index 0000000000000000000000000000000000000000..743c5322ec13708ad6d9bd526fb113dfb86e38b3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9682ba919f23fd2efddf43910798451e8e7f75415d38ed0e818f82b242e57922.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e7a5eb15497d2c59964c923506e27441884335d068e55d1806da6681a1ce493 +size 6853 diff --git a/data/2025/2504_05xxx/2504.05979/images/96954093e4eb35d0ca710e8119a031cdc99bc1453c9ec711f72072006f7ffbbb.jpg b/data/2025/2504_05xxx/2504.05979/images/96954093e4eb35d0ca710e8119a031cdc99bc1453c9ec711f72072006f7ffbbb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aee87dc644bdc3d25f630546b999bbdb4e81d9a8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/96954093e4eb35d0ca710e8119a031cdc99bc1453c9ec711f72072006f7ffbbb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebef43ecef6d9bf0d09662874bcd6c04c31790345eb7ce4a1976ce3f8954d0e6 +size 12953 diff --git a/data/2025/2504_05xxx/2504.05979/images/96c1a55c7c5f312a71c89d34f708b0e4f8da4556922ffdee41d673b76ea82dad.jpg b/data/2025/2504_05xxx/2504.05979/images/96c1a55c7c5f312a71c89d34f708b0e4f8da4556922ffdee41d673b76ea82dad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f735445b90205dd03c39fe3f7a4f903e05730cd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/96c1a55c7c5f312a71c89d34f708b0e4f8da4556922ffdee41d673b76ea82dad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ad42affd4df6a7fc93de1f6ed0ec8b6578fce8e3524b3acbf80e302c8081948 +size 20579 diff --git a/data/2025/2504_05xxx/2504.05979/images/96f8443e9e1d95f4d7b78542413a7daa8e652b264b9a0a1fb5f13e9ccdf11c2d.jpg b/data/2025/2504_05xxx/2504.05979/images/96f8443e9e1d95f4d7b78542413a7daa8e652b264b9a0a1fb5f13e9ccdf11c2d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f0ab2cc631267b402fcadb3ae85771a867fd900b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/96f8443e9e1d95f4d7b78542413a7daa8e652b264b9a0a1fb5f13e9ccdf11c2d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46039b69aa877eadedd6788a17255c808ce06b02321a6a69defac6d56e2d86b2 +size 10008 diff --git a/data/2025/2504_05xxx/2504.05979/images/97cb4999fb7546f2acbbc518825a138f6e835d048cf927abe4812e6a75004c0d.jpg b/data/2025/2504_05xxx/2504.05979/images/97cb4999fb7546f2acbbc518825a138f6e835d048cf927abe4812e6a75004c0d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..43380869e5994ea91a32680b2782c2a7103e7cf7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/97cb4999fb7546f2acbbc518825a138f6e835d048cf927abe4812e6a75004c0d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c66850494bb838dde887909017dee13f68dd711e4726ac9717215ede89c3742 +size 5501 diff --git a/data/2025/2504_05xxx/2504.05979/images/97eac775ab7fe9404225b9ec41d4b00866061d69df88ce9adc4b8d365ba02992.jpg b/data/2025/2504_05xxx/2504.05979/images/97eac775ab7fe9404225b9ec41d4b00866061d69df88ce9adc4b8d365ba02992.jpg new file mode 100644 index 0000000000000000000000000000000000000000..281967fea3331290effc05a62146ce8ec290e4c7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/97eac775ab7fe9404225b9ec41d4b00866061d69df88ce9adc4b8d365ba02992.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:364375b6a1deb77525202f11b0088bfc74d9ca67410e74de17901fab54b3ed87 +size 13647 diff --git a/data/2025/2504_05xxx/2504.05979/images/9801dce9b0ee5070b386427554ed58b6a513f89be080d33cee85238e85084ce4.jpg b/data/2025/2504_05xxx/2504.05979/images/9801dce9b0ee5070b386427554ed58b6a513f89be080d33cee85238e85084ce4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..29b29621cce8c30fcab3ea3d3c1a618ab4c406c9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9801dce9b0ee5070b386427554ed58b6a513f89be080d33cee85238e85084ce4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ec0b101b2e90bb43c4715a929c1708e2e873f8e9780b1cc5e8bdbd1872979cd +size 9277 diff --git a/data/2025/2504_05xxx/2504.05979/images/98924e97a312fd055c311dfce4f2f33c194a08959c53493dda6596e9bbacc31e.jpg b/data/2025/2504_05xxx/2504.05979/images/98924e97a312fd055c311dfce4f2f33c194a08959c53493dda6596e9bbacc31e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..80b0e6cebf37d61c48a61e7d94c0a02ba54fa524 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/98924e97a312fd055c311dfce4f2f33c194a08959c53493dda6596e9bbacc31e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f12caffb718598a01aaf8b3dde0e782d296e9a355bac3909912aad01cc32fe09 +size 10890 diff --git a/data/2025/2504_05xxx/2504.05979/images/98a34d23216e8ad84b654750599c6cf856ae93a257ecd204feb4347ff25f2411.jpg b/data/2025/2504_05xxx/2504.05979/images/98a34d23216e8ad84b654750599c6cf856ae93a257ecd204feb4347ff25f2411.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c53d43acf18e2299bc824a4fd697d56ed39193a4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/98a34d23216e8ad84b654750599c6cf856ae93a257ecd204feb4347ff25f2411.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:442235d951c921d8422f7047a5b360f676119c6d37d5165a5947a3fd7c41cb0e +size 9283 diff --git a/data/2025/2504_05xxx/2504.05979/images/98fe6ae3af0e25ba30cef5b80dbf80b30b9cf636e59a4dadffc0a63864c71cdd.jpg b/data/2025/2504_05xxx/2504.05979/images/98fe6ae3af0e25ba30cef5b80dbf80b30b9cf636e59a4dadffc0a63864c71cdd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e00a99b43c06811f59a765ebbfb4da0890181274 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/98fe6ae3af0e25ba30cef5b80dbf80b30b9cf636e59a4dadffc0a63864c71cdd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3268c2a5df3a3f072ccb9e89a1ef73a609a764e464029cdc52916e578772acf3 +size 8130 diff --git a/data/2025/2504_05xxx/2504.05979/images/9915d6c0aa3e038b5ec03ebeb8dcca7a1b7496741251e2af39361e947240d627.jpg b/data/2025/2504_05xxx/2504.05979/images/9915d6c0aa3e038b5ec03ebeb8dcca7a1b7496741251e2af39361e947240d627.jpg new file mode 100644 index 0000000000000000000000000000000000000000..32f8f689bf1d78c322a8f1e3a64a44c24c0d9102 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9915d6c0aa3e038b5ec03ebeb8dcca7a1b7496741251e2af39361e947240d627.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8a55dbec85206a6c765fba283575dbcd6c861d384ad323a9b2913f61d9578f9 +size 9372 diff --git a/data/2025/2504_05xxx/2504.05979/images/995ae0408307d51c1fe4b22c7386e33f12f5f2de57aab5e0c882854a62a49b4a.jpg b/data/2025/2504_05xxx/2504.05979/images/995ae0408307d51c1fe4b22c7386e33f12f5f2de57aab5e0c882854a62a49b4a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..520921cf4f4aed65d6883fbeee1426fc1e1de7d8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/995ae0408307d51c1fe4b22c7386e33f12f5f2de57aab5e0c882854a62a49b4a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae3c40beea758615825717361cfeef9cfa8330a29597f046997760b736475a8f +size 9209 diff --git a/data/2025/2504_05xxx/2504.05979/images/999497601ef88fdd1c39e3f4a28faa7e03a78e030514867e7db3e4a3be244eff.jpg b/data/2025/2504_05xxx/2504.05979/images/999497601ef88fdd1c39e3f4a28faa7e03a78e030514867e7db3e4a3be244eff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2fede254efa14816b343e067f3605fd568d58633 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/999497601ef88fdd1c39e3f4a28faa7e03a78e030514867e7db3e4a3be244eff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a93072644aa1651f2a5042a26d97c8f7ad34711b85eb2af51dffee1288e00120 +size 3717 diff --git a/data/2025/2504_05xxx/2504.05979/images/9a09eb88416d90950b00ecbf953c22c1a7c50188a8cd661fe6744dcc97906d76.jpg b/data/2025/2504_05xxx/2504.05979/images/9a09eb88416d90950b00ecbf953c22c1a7c50188a8cd661fe6744dcc97906d76.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bdd06afdfad47b92983c41300d8d768c02d4de17 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9a09eb88416d90950b00ecbf953c22c1a7c50188a8cd661fe6744dcc97906d76.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32a430f7e6203158130088c7568d4927b1931b7e90361a38b844fc15b1bd93d9 +size 13638 diff --git a/data/2025/2504_05xxx/2504.05979/images/9a87f98dd86139de0d38246d54b1d070591e2fdb526d3285409a473583f28ec6.jpg b/data/2025/2504_05xxx/2504.05979/images/9a87f98dd86139de0d38246d54b1d070591e2fdb526d3285409a473583f28ec6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87bd0a57700ce67d2191983d579c3198333b409e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9a87f98dd86139de0d38246d54b1d070591e2fdb526d3285409a473583f28ec6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d896a2b85a4004f23327f3894470344161b1efe95bac80602f3613f386b95df +size 20406 diff --git a/data/2025/2504_05xxx/2504.05979/images/9a9ede0bf4fe178006bfdab612511041503aa542b158bd5a1cafebee66aa0d19.jpg b/data/2025/2504_05xxx/2504.05979/images/9a9ede0bf4fe178006bfdab612511041503aa542b158bd5a1cafebee66aa0d19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..783a7e73d0013f73ec0f023a498ea4f5f61d2256 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9a9ede0bf4fe178006bfdab612511041503aa542b158bd5a1cafebee66aa0d19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f1e3a8303c79eae5481190fd87886ba57ad8cb7709a3fab328b1ae32440c170 +size 6990 diff --git a/data/2025/2504_05xxx/2504.05979/images/9aa5032b866731f07d0e37da2dd1017f2e4122d3413aa5ce54be7e8880b33931.jpg b/data/2025/2504_05xxx/2504.05979/images/9aa5032b866731f07d0e37da2dd1017f2e4122d3413aa5ce54be7e8880b33931.jpg new file mode 100644 index 0000000000000000000000000000000000000000..60a749a20068fe4bda810a6e4d6f4d55a69595f7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9aa5032b866731f07d0e37da2dd1017f2e4122d3413aa5ce54be7e8880b33931.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5120e64cd0734f1f878775dea9a4eb5420d7ebaafc8e48dd6cf5b60ac9e2167a +size 10005 diff --git a/data/2025/2504_05xxx/2504.05979/images/9b0a74f7db3b0699ac94506c383fb22b1af1f2bd911d7fa4312ee9ca289612ef.jpg b/data/2025/2504_05xxx/2504.05979/images/9b0a74f7db3b0699ac94506c383fb22b1af1f2bd911d7fa4312ee9ca289612ef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3414edf2266930f3f95a80abca0f5092549b7475 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9b0a74f7db3b0699ac94506c383fb22b1af1f2bd911d7fa4312ee9ca289612ef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61b5d69da2a021136302781d853a6c1fbd0f4023b94e00191b2453b576d12ed0 +size 3468 diff --git a/data/2025/2504_05xxx/2504.05979/images/9b25e051c800568d87dbf568b975a694ac1831be64dac4fe52e936e4cbe78e4f.jpg b/data/2025/2504_05xxx/2504.05979/images/9b25e051c800568d87dbf568b975a694ac1831be64dac4fe52e936e4cbe78e4f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d37f5bfa3f98c723f92fa4675a5e4808978a9c68 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9b25e051c800568d87dbf568b975a694ac1831be64dac4fe52e936e4cbe78e4f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d5c832b3f928b4980463343f65cf1c40fd69d97ff3adf94f31b7e1dc7f31021 +size 8537 diff --git a/data/2025/2504_05xxx/2504.05979/images/9b26a02ec460020f269c9e957ef3f51615c8aec20faba332c7ad88bddff5fcea.jpg b/data/2025/2504_05xxx/2504.05979/images/9b26a02ec460020f269c9e957ef3f51615c8aec20faba332c7ad88bddff5fcea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0a7e3b1a063d1373cfc7f03835c06889036a0d6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9b26a02ec460020f269c9e957ef3f51615c8aec20faba332c7ad88bddff5fcea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6231999fd39a391aa6023183e0139c242b639c9bda4274591abf00aaca428b8 +size 21583 diff --git a/data/2025/2504_05xxx/2504.05979/images/9b39db21bfca83d6660c37f7265a41bd9d7830647b6fcbdaafa3cc14372c9c6e.jpg b/data/2025/2504_05xxx/2504.05979/images/9b39db21bfca83d6660c37f7265a41bd9d7830647b6fcbdaafa3cc14372c9c6e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f784308d7b0473d5cad2894fd0c907901ad49c43 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9b39db21bfca83d6660c37f7265a41bd9d7830647b6fcbdaafa3cc14372c9c6e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc427eb91edc357e088929b701aa1083371752113a5d113045e9fe9ea8d9aca9 +size 20919 diff --git a/data/2025/2504_05xxx/2504.05979/images/9b6616b3e3e491d5719d2c2596fedf5f8396a133bfd0edae1a8a64d343c4cd8c.jpg b/data/2025/2504_05xxx/2504.05979/images/9b6616b3e3e491d5719d2c2596fedf5f8396a133bfd0edae1a8a64d343c4cd8c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..95ae318b3c773201574394e2105205d8892ae8a2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9b6616b3e3e491d5719d2c2596fedf5f8396a133bfd0edae1a8a64d343c4cd8c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7855400185d541abbfed714d136db1a1ac310a0e1f596df89d65bacd55d761b3 +size 10558 diff --git a/data/2025/2504_05xxx/2504.05979/images/9bb8f63af64ed04966c8891cf4cf6ec49bd44f0d1a11eca0e431b45d7918994e.jpg b/data/2025/2504_05xxx/2504.05979/images/9bb8f63af64ed04966c8891cf4cf6ec49bd44f0d1a11eca0e431b45d7918994e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42414b49df253850c7c81c7cc00dd3f8c9357b0e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9bb8f63af64ed04966c8891cf4cf6ec49bd44f0d1a11eca0e431b45d7918994e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b949b8ecde6274a010d7a15fce60eeb7126b59c40f715aa54c7c710443cec7cb +size 7979 diff --git a/data/2025/2504_05xxx/2504.05979/images/9bba0dc289d8ec29ff37642b0bc4d84feafbf8e7b361e6e17186d7b033c6b5be.jpg b/data/2025/2504_05xxx/2504.05979/images/9bba0dc289d8ec29ff37642b0bc4d84feafbf8e7b361e6e17186d7b033c6b5be.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9159dbd988355deebfdeea043fa76e8b50e95258 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9bba0dc289d8ec29ff37642b0bc4d84feafbf8e7b361e6e17186d7b033c6b5be.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6372bd1542adcfd38d5d695bdbfb141f72474127eb9ae8d994b196b8e4c8085 +size 15969 diff --git a/data/2025/2504_05xxx/2504.05979/images/9bfe1fd24e221db430a1ef2c97a6b127a44b5d7b0c9d1975f8c8209268003e53.jpg b/data/2025/2504_05xxx/2504.05979/images/9bfe1fd24e221db430a1ef2c97a6b127a44b5d7b0c9d1975f8c8209268003e53.jpg new file mode 100644 index 0000000000000000000000000000000000000000..23285a107146fdb07954fe05e9158f5c3d7182b0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9bfe1fd24e221db430a1ef2c97a6b127a44b5d7b0c9d1975f8c8209268003e53.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4da48bc683b475fba5cb6d9944d60ea47148bff0441a645b158822fd512f4875 +size 7621 diff --git a/data/2025/2504_05xxx/2504.05979/images/9c2265499ec9f6a0c6e61a8914311c8b94d20424cd7a53a126ce5f30f3f6bf33.jpg b/data/2025/2504_05xxx/2504.05979/images/9c2265499ec9f6a0c6e61a8914311c8b94d20424cd7a53a126ce5f30f3f6bf33.jpg new file mode 100644 index 0000000000000000000000000000000000000000..27ef9170ae5915dd40c9297b88bcb858e35b0d73 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9c2265499ec9f6a0c6e61a8914311c8b94d20424cd7a53a126ce5f30f3f6bf33.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc11c2dfcce272cf4ff53232b0c243ae4ae0500a03832cbaa7ff99dae0b3c554 +size 955 diff --git a/data/2025/2504_05xxx/2504.05979/images/9d2ccc2dc8ada5802d342f8533d6e9a320919119569352e82afb4bf3500698e0.jpg b/data/2025/2504_05xxx/2504.05979/images/9d2ccc2dc8ada5802d342f8533d6e9a320919119569352e82afb4bf3500698e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a064af6ff7bc9c74e81f16b2e4108cb3bfd5ebe8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9d2ccc2dc8ada5802d342f8533d6e9a320919119569352e82afb4bf3500698e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29a7248192fd93c31161e899c8be41377c1ceb44ac0365fd6ea45f6d3f2a898b +size 7699 diff --git a/data/2025/2504_05xxx/2504.05979/images/9d509d18edc90347ae9f6dcacd7565656a476da05b9ef46fdf5918a71a428c43.jpg b/data/2025/2504_05xxx/2504.05979/images/9d509d18edc90347ae9f6dcacd7565656a476da05b9ef46fdf5918a71a428c43.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54b6fcf0c5ea0e6b89c5451a9101679d21138ea9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9d509d18edc90347ae9f6dcacd7565656a476da05b9ef46fdf5918a71a428c43.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e7ae441a72bc8d110493045fa1e58b26a8f98ec7cff19d0b7ee485073d2cc79 +size 6389 diff --git a/data/2025/2504_05xxx/2504.05979/images/9d7e40286ddeb28b93be6916d8101d062aa91f512e1c1ac6b0ccc889ae647631.jpg b/data/2025/2504_05xxx/2504.05979/images/9d7e40286ddeb28b93be6916d8101d062aa91f512e1c1ac6b0ccc889ae647631.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ede78d6fc643393e18ae657e3b87bf1c8a94ec2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9d7e40286ddeb28b93be6916d8101d062aa91f512e1c1ac6b0ccc889ae647631.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9aef847aa9a0257b9a2c0d4a700dbc1d7b9de300f43419ce550a9eb829b3fd5 +size 968 diff --git a/data/2025/2504_05xxx/2504.05979/images/9d85af9012642e27d9952f9795e008d868506f471396fa6b593f956b82ca6fd0.jpg b/data/2025/2504_05xxx/2504.05979/images/9d85af9012642e27d9952f9795e008d868506f471396fa6b593f956b82ca6fd0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0383e0d8e4f18a46087643a390be2e9aa1b91089 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9d85af9012642e27d9952f9795e008d868506f471396fa6b593f956b82ca6fd0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e463298c52d7ad1d27e21a5b5ae83b54c372fcd6614b1acd78301488f7f88042 +size 12183 diff --git a/data/2025/2504_05xxx/2504.05979/images/9df5012deab1c75c8cc3c9b199f8d51adaf06952378b5293634b0a061a791b0f.jpg b/data/2025/2504_05xxx/2504.05979/images/9df5012deab1c75c8cc3c9b199f8d51adaf06952378b5293634b0a061a791b0f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f1b12b2687fe35507315262876d392d46339596 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9df5012deab1c75c8cc3c9b199f8d51adaf06952378b5293634b0a061a791b0f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0c3d13b03bf9c18c77ca3181062b1652714e401003070ed3637d78e54dbe72f +size 4734 diff --git a/data/2025/2504_05xxx/2504.05979/images/9dff828e49740629bf2ee9f75d69169e08193f97717d5fed56fe0f6c1a5bcc42.jpg b/data/2025/2504_05xxx/2504.05979/images/9dff828e49740629bf2ee9f75d69169e08193f97717d5fed56fe0f6c1a5bcc42.jpg new file mode 100644 index 0000000000000000000000000000000000000000..05697bc37cc88b8ebfe271d4f74482af0b5023fd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9dff828e49740629bf2ee9f75d69169e08193f97717d5fed56fe0f6c1a5bcc42.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4bc41d71af86d735a004af300d1a9358dd6b3f0adbad6a7a0850886b6248721 +size 9589 diff --git a/data/2025/2504_05xxx/2504.05979/images/9e208291de9a6b6fb7d4acda0172d03e668194c7eb8b35eb83dd89d00607f9ab.jpg b/data/2025/2504_05xxx/2504.05979/images/9e208291de9a6b6fb7d4acda0172d03e668194c7eb8b35eb83dd89d00607f9ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6654485c1bdfa1ad255d6808caa1e4f62b0a4ddb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9e208291de9a6b6fb7d4acda0172d03e668194c7eb8b35eb83dd89d00607f9ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d38bba6bf5e28e944fbf9dafa7b2b2d2305a761f9f175077bd73e505ba18236d +size 11321 diff --git a/data/2025/2504_05xxx/2504.05979/images/9e5e6d93ce20c260e565400b12fea89bd8bc529187af868dd712070ab1c328f8.jpg b/data/2025/2504_05xxx/2504.05979/images/9e5e6d93ce20c260e565400b12fea89bd8bc529187af868dd712070ab1c328f8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b420e0f80545566ce14468ab2a0a80d2e18a586 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9e5e6d93ce20c260e565400b12fea89bd8bc529187af868dd712070ab1c328f8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e240a2a6974472a3ec6613bf8b766431caba23f62e2242a8dd27e05300e3083 +size 8529 diff --git a/data/2025/2504_05xxx/2504.05979/images/9e7f0f5aebdeb1b02b10a56ee4521570fb0755150225325cdd221a08f72aeae6.jpg b/data/2025/2504_05xxx/2504.05979/images/9e7f0f5aebdeb1b02b10a56ee4521570fb0755150225325cdd221a08f72aeae6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd420fb72d25ab6650b52421d7040aa0242d4028 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9e7f0f5aebdeb1b02b10a56ee4521570fb0755150225325cdd221a08f72aeae6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2da92fa72f35d9e891049513b204c7100efe19b4bf07de69ef050945fff6175 +size 7354 diff --git a/data/2025/2504_05xxx/2504.05979/images/9e99b81570d109732b42b178967ae11e9ed7d965809fd7f2fd60d19ccbc7cd6d.jpg b/data/2025/2504_05xxx/2504.05979/images/9e99b81570d109732b42b178967ae11e9ed7d965809fd7f2fd60d19ccbc7cd6d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c7c197acf150d6f831449c4ba289907f15fe1fa --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9e99b81570d109732b42b178967ae11e9ed7d965809fd7f2fd60d19ccbc7cd6d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6dd927072bcc45f00b44d49a2c42ad6068034ca1943bf73201288b9f64490366 +size 6259 diff --git a/data/2025/2504_05xxx/2504.05979/images/9f7208b84dbf5365d061b28896f055f01299d12b30af1dee82093163431a17e5.jpg b/data/2025/2504_05xxx/2504.05979/images/9f7208b84dbf5365d061b28896f055f01299d12b30af1dee82093163431a17e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa1574b2915a349334af4085d24c10bc3743780d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9f7208b84dbf5365d061b28896f055f01299d12b30af1dee82093163431a17e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57193425815676e1b88ca822431594bd4492365e3fae5a94dbe769aa91814253 +size 981 diff --git a/data/2025/2504_05xxx/2504.05979/images/9f7537d877ef9e59c4b7d2865b5b20522610acea27d5295f2b7a540a7f158aa1.jpg b/data/2025/2504_05xxx/2504.05979/images/9f7537d877ef9e59c4b7d2865b5b20522610acea27d5295f2b7a540a7f158aa1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94cbf00f38397cf10ba998af76c74f2738ab0afc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9f7537d877ef9e59c4b7d2865b5b20522610acea27d5295f2b7a540a7f158aa1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15bcc656ef9cf608b728a639cd296abb5e4337b780b0fbbac371634a809dfe8f +size 8758 diff --git a/data/2025/2504_05xxx/2504.05979/images/9f84d4d4f227df36ae2d8eb29101d274a1877c545a2468371a5088741bc3cd8f.jpg b/data/2025/2504_05xxx/2504.05979/images/9f84d4d4f227df36ae2d8eb29101d274a1877c545a2468371a5088741bc3cd8f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ac80ae6248680db6fbfce3c5b840cf2ed14fd13c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9f84d4d4f227df36ae2d8eb29101d274a1877c545a2468371a5088741bc3cd8f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d07c586da303ea696e163fba9e3c673f2517d9b61b8ddd02db3956afae006040 +size 11453 diff --git a/data/2025/2504_05xxx/2504.05979/images/9fe5199286f0112238a7073ea1794d37f5662a85af0714537b15349dcc732e6c.jpg b/data/2025/2504_05xxx/2504.05979/images/9fe5199286f0112238a7073ea1794d37f5662a85af0714537b15349dcc732e6c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d89cd4b6d53faf89e6b09c1f8a88d42e97867e9d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9fe5199286f0112238a7073ea1794d37f5662a85af0714537b15349dcc732e6c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5eb84f63bcf4773c83088fa20720b787b1a89a0e6cfcd512bf4d863075361fdb +size 1034 diff --git a/data/2025/2504_05xxx/2504.05979/images/9ff188c0ba980c8e75a69ac4f116a1d53d641501e8a2976781fdf333d07f29ca.jpg b/data/2025/2504_05xxx/2504.05979/images/9ff188c0ba980c8e75a69ac4f116a1d53d641501e8a2976781fdf333d07f29ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5defec007ef326b88182c9c9215b47c69557e2c3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/9ff188c0ba980c8e75a69ac4f116a1d53d641501e8a2976781fdf333d07f29ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8b05b46a922f183649bd4141ee3d9bbe8b86edc9d89f1a328f9ce9fabfc40f6 +size 10364 diff --git a/data/2025/2504_05xxx/2504.05979/images/a05ae86c57f255f29915d6f1ca7fbca481e453d5260a2ee80c1df5f1a84c31e1.jpg b/data/2025/2504_05xxx/2504.05979/images/a05ae86c57f255f29915d6f1ca7fbca481e453d5260a2ee80c1df5f1a84c31e1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4e38a54a46ddcf31bf505ffd67706e70bcafba87 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a05ae86c57f255f29915d6f1ca7fbca481e453d5260a2ee80c1df5f1a84c31e1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02231967481bf1bac4204c0c926f19047aef2ee17f3345e61d825d720b9f1a6a +size 8366 diff --git a/data/2025/2504_05xxx/2504.05979/images/a093ca2444e0583bf58d16a259862c8f04e828919d40f21a081d04a7116fc357.jpg b/data/2025/2504_05xxx/2504.05979/images/a093ca2444e0583bf58d16a259862c8f04e828919d40f21a081d04a7116fc357.jpg new file mode 100644 index 0000000000000000000000000000000000000000..235bc298c06d072667e20aab8b7c4bb01e66310a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a093ca2444e0583bf58d16a259862c8f04e828919d40f21a081d04a7116fc357.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b38037bad488c8c827f9e864f6faa10e06442991b19d89233d39a014feafd9a +size 8983 diff --git a/data/2025/2504_05xxx/2504.05979/images/a0be173027e50abb4eeaa3a09379cf5495e4baeb4d07b8cfe3c3fbfc7f49da66.jpg b/data/2025/2504_05xxx/2504.05979/images/a0be173027e50abb4eeaa3a09379cf5495e4baeb4d07b8cfe3c3fbfc7f49da66.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4235064ae10f0c153ed870a70c2816e86cd55814 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a0be173027e50abb4eeaa3a09379cf5495e4baeb4d07b8cfe3c3fbfc7f49da66.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:991e30cbdfa07c14c198d0cfe71445a1a8777b96f53ecfa4564e86c20095662a +size 8457 diff --git a/data/2025/2504_05xxx/2504.05979/images/a0db75620e3e1969b865f1e6ae4a41bb07983621337b59c87edc39c85287a2dc.jpg b/data/2025/2504_05xxx/2504.05979/images/a0db75620e3e1969b865f1e6ae4a41bb07983621337b59c87edc39c85287a2dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ed027057c36f2641a1f9910c25ca1ce9f15fb6b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a0db75620e3e1969b865f1e6ae4a41bb07983621337b59c87edc39c85287a2dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c45d00d167557e807c8118a74a8584f49e49bb2e00eeb57583a078edeeadbc7 +size 6376 diff --git a/data/2025/2504_05xxx/2504.05979/images/a0ebad62eca5fc477e0815751adacf1e46b3abcdce6c306101ecb96b7043a8c4.jpg b/data/2025/2504_05xxx/2504.05979/images/a0ebad62eca5fc477e0815751adacf1e46b3abcdce6c306101ecb96b7043a8c4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b177671bd18ad9b29cad048ab8976c968d071e99 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a0ebad62eca5fc477e0815751adacf1e46b3abcdce6c306101ecb96b7043a8c4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:338e395a3cdde5d12a0663481e8c25673b189482cedb6653e4eeac461a33e236 +size 7776 diff --git a/data/2025/2504_05xxx/2504.05979/images/a1117bdf2682b8d7115f12b7af60c126adce2d44e56036f3f9677ed0f6fa8cda.jpg b/data/2025/2504_05xxx/2504.05979/images/a1117bdf2682b8d7115f12b7af60c126adce2d44e56036f3f9677ed0f6fa8cda.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bffbed80198cc2b25795ecb424f21ed869cf03a5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a1117bdf2682b8d7115f12b7af60c126adce2d44e56036f3f9677ed0f6fa8cda.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21f903fc16ddfa74dc2c598ff7bbc09bee4e842a8ef9466e1bbed65269aad101 +size 3053 diff --git a/data/2025/2504_05xxx/2504.05979/images/a11e7e5a889c46ee27cc17d0f091d77cb1c167e6c4314649ed219089b6bfe94a.jpg b/data/2025/2504_05xxx/2504.05979/images/a11e7e5a889c46ee27cc17d0f091d77cb1c167e6c4314649ed219089b6bfe94a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..108ea472215d1c0a364e2be9c4e6e60c63b83e80 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a11e7e5a889c46ee27cc17d0f091d77cb1c167e6c4314649ed219089b6bfe94a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f65816a16119b29872babe6de9615b2ea14649336c33ad2b2476f6caee630e86 +size 12336 diff --git a/data/2025/2504_05xxx/2504.05979/images/a145180a5acb6bef4bb07f8a9b725b80a966f60fdc5991366b23f4cc7d100d3d.jpg b/data/2025/2504_05xxx/2504.05979/images/a145180a5acb6bef4bb07f8a9b725b80a966f60fdc5991366b23f4cc7d100d3d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a217b29dd477c9bf58e2ec745fa39c06bbaf31af --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a145180a5acb6bef4bb07f8a9b725b80a966f60fdc5991366b23f4cc7d100d3d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef560f6042e46e6cf3209a233cca19689f7f9fefca3b42f3ecfa629728e535aa +size 5287 diff --git a/data/2025/2504_05xxx/2504.05979/images/a179e4417d79d0b36fecc454a7c170f44da2165e686f30f95e1e62f5dacec7e9.jpg b/data/2025/2504_05xxx/2504.05979/images/a179e4417d79d0b36fecc454a7c170f44da2165e686f30f95e1e62f5dacec7e9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..219c26027092165a76031fb3922e591127698ebb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a179e4417d79d0b36fecc454a7c170f44da2165e686f30f95e1e62f5dacec7e9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:642fd9fc78b07f9592913da0964fa7ff64f6d2c5480d6e4d897863c0bdf9fd46 +size 12932 diff --git a/data/2025/2504_05xxx/2504.05979/images/a1cf0fd8a07f055d804221a00dac3ab6196ba65037b7980ae7d6ac1150ef8152.jpg b/data/2025/2504_05xxx/2504.05979/images/a1cf0fd8a07f055d804221a00dac3ab6196ba65037b7980ae7d6ac1150ef8152.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f43c2d33d80988b86823a9eebf19b9e9352ecfb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a1cf0fd8a07f055d804221a00dac3ab6196ba65037b7980ae7d6ac1150ef8152.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ac2b0eb778e65d40845e527170229902f546b3fd264be3e3cdc07e003ba221d +size 9284 diff --git a/data/2025/2504_05xxx/2504.05979/images/a1e959570f347e3fef4bcc4ccda36cec01a4c6a5cdd272c2a85fbb3cc2dea20d.jpg b/data/2025/2504_05xxx/2504.05979/images/a1e959570f347e3fef4bcc4ccda36cec01a4c6a5cdd272c2a85fbb3cc2dea20d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..70e9e7bf07e0b2d74a907b19042038b7d2e0b2a5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a1e959570f347e3fef4bcc4ccda36cec01a4c6a5cdd272c2a85fbb3cc2dea20d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:423bbd57f4d19f70ca41c5b52943f1fad751ddc0760f86845cb4d8fe7fa920a0 +size 5645 diff --git a/data/2025/2504_05xxx/2504.05979/images/a24d3f139726d7234b378db7110157186e07c2e355a0860dbbefa990e5431c62.jpg b/data/2025/2504_05xxx/2504.05979/images/a24d3f139726d7234b378db7110157186e07c2e355a0860dbbefa990e5431c62.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2162d2001cf0feb0228ec60b6eee2da6780383a7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a24d3f139726d7234b378db7110157186e07c2e355a0860dbbefa990e5431c62.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95885ddf8d9755f7e4e8066c9f6469e00ef0504886c3e875b5ed5e0875f49e28 +size 10058 diff --git a/data/2025/2504_05xxx/2504.05979/images/a25788560611502b56750b8036243b4afafc5fde1438409b033768440c793002.jpg b/data/2025/2504_05xxx/2504.05979/images/a25788560611502b56750b8036243b4afafc5fde1438409b033768440c793002.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20d5dc7b8989b3da422dce11a7b980c6972ad9f4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a25788560611502b56750b8036243b4afafc5fde1438409b033768440c793002.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3091ee6191b4fec8dd1f9b0b8eb15db5183247245ecab13d55bf88b079e9436 +size 11315 diff --git a/data/2025/2504_05xxx/2504.05979/images/a26d98ea46dba3acd2775aa3134da02bf0e703e9b8a805247053fdfe1b12d8bc.jpg b/data/2025/2504_05xxx/2504.05979/images/a26d98ea46dba3acd2775aa3134da02bf0e703e9b8a805247053fdfe1b12d8bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..446c1b2aa4933fa222b8089e02c82c435fbeb1fb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a26d98ea46dba3acd2775aa3134da02bf0e703e9b8a805247053fdfe1b12d8bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2efc5c30218d764762a3d0fabd26154b236187ea51dbcb6e2701b820a4e3a618 +size 8127 diff --git a/data/2025/2504_05xxx/2504.05979/images/a34351a4399d5f36a740b505bcdf6d1791a25970e7e86a6c65bfafedbf3d931e.jpg b/data/2025/2504_05xxx/2504.05979/images/a34351a4399d5f36a740b505bcdf6d1791a25970e7e86a6c65bfafedbf3d931e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b0aec4cd45a8fc6692a986e4b6336bdc7f8cd1e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a34351a4399d5f36a740b505bcdf6d1791a25970e7e86a6c65bfafedbf3d931e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b972c6898259f2a126192c0db8d6d349ca4c61ed185e1a6dd854599d9c243f0e +size 6087 diff --git a/data/2025/2504_05xxx/2504.05979/images/a3692ab260876169e43b10f0a13b3e0a2358a469179695d6391b7121a291f656.jpg b/data/2025/2504_05xxx/2504.05979/images/a3692ab260876169e43b10f0a13b3e0a2358a469179695d6391b7121a291f656.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a11a9bd24127c77873947c45f7f2ad986f590354 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a3692ab260876169e43b10f0a13b3e0a2358a469179695d6391b7121a291f656.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:265652d4d3886ec8772d95efbb09f78a83f14f4448fb5d7f48db5cbc32b20ef4 +size 10079 diff --git a/data/2025/2504_05xxx/2504.05979/images/a3941e54ef0c8ca433e936496f30948b94b4e9f8ef2115b981275376d66cf6b7.jpg b/data/2025/2504_05xxx/2504.05979/images/a3941e54ef0c8ca433e936496f30948b94b4e9f8ef2115b981275376d66cf6b7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..252e36ed71f2f21fe66be0161fb42e4b493a658f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a3941e54ef0c8ca433e936496f30948b94b4e9f8ef2115b981275376d66cf6b7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27c97c4d3679183f5b8146cf27b2c559ddd98d20ece957eef7dc9158d74bcfe2 +size 8881 diff --git a/data/2025/2504_05xxx/2504.05979/images/a39507e46b3ffd1bbdf0ff1d20f603e166fb2c83a3cb4b81c304a49bc56d055e.jpg b/data/2025/2504_05xxx/2504.05979/images/a39507e46b3ffd1bbdf0ff1d20f603e166fb2c83a3cb4b81c304a49bc56d055e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8253ad09eb25d935cdaeb84e156b02afa17f7726 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a39507e46b3ffd1bbdf0ff1d20f603e166fb2c83a3cb4b81c304a49bc56d055e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a5a2bc45529769ce2904833bbc0f6ae5527224366437cc9ed93fe8ce1047bd0 +size 9354 diff --git a/data/2025/2504_05xxx/2504.05979/images/a43b10404face144826ade37cce23e7383b09b5f6616cd1ba4fb27d65847035f.jpg b/data/2025/2504_05xxx/2504.05979/images/a43b10404face144826ade37cce23e7383b09b5f6616cd1ba4fb27d65847035f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dd27f83a0c4fe00ce346a6f057e3e3291376625c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a43b10404face144826ade37cce23e7383b09b5f6616cd1ba4fb27d65847035f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2764f4f7afcd1154db0cd0a8a6c83a5dfdce8e54eb2ab9dd0951326e6fd090a +size 5438 diff --git a/data/2025/2504_05xxx/2504.05979/images/a492c7a006b896e7436ce9df3705f82414a33538f91a0c7ba1f64667941a3dc1.jpg b/data/2025/2504_05xxx/2504.05979/images/a492c7a006b896e7436ce9df3705f82414a33538f91a0c7ba1f64667941a3dc1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b949dbad670f12745266e07132a37ef6f5d6ef11 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a492c7a006b896e7436ce9df3705f82414a33538f91a0c7ba1f64667941a3dc1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90005a80574d10459ba1728d478a1721a4302a5313415ab62c4ce99575d02b4b +size 2695 diff --git a/data/2025/2504_05xxx/2504.05979/images/a4d89254c371d49daaab63ab7c798533a278ce2f2d6e10bca6da553223761a32.jpg b/data/2025/2504_05xxx/2504.05979/images/a4d89254c371d49daaab63ab7c798533a278ce2f2d6e10bca6da553223761a32.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e976255aeb30b0d6b7d5b6ef24b71d00bb8aa2bf --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a4d89254c371d49daaab63ab7c798533a278ce2f2d6e10bca6da553223761a32.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c039c4a97ce49ab0a6dcf620dfbaa20a4bf778a6cfbc413d40f3abe6302a0c20 +size 18587 diff --git a/data/2025/2504_05xxx/2504.05979/images/a52dd1eb6b418ed5ae687d895e95de05feea7dd01a9e70453d3ef0959530bc3a.jpg b/data/2025/2504_05xxx/2504.05979/images/a52dd1eb6b418ed5ae687d895e95de05feea7dd01a9e70453d3ef0959530bc3a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a76fbabe45494c5a997cb51317608fb38cf92ad8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a52dd1eb6b418ed5ae687d895e95de05feea7dd01a9e70453d3ef0959530bc3a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c448d96b2133f6aa017480612a02f30c84cfa0dd07580835a887d2a0c794d2d8 +size 16389 diff --git a/data/2025/2504_05xxx/2504.05979/images/a545885da447cbcefa593efa1163db12a6ecaa2cdbbf6e96a524f3a907d3724c.jpg b/data/2025/2504_05xxx/2504.05979/images/a545885da447cbcefa593efa1163db12a6ecaa2cdbbf6e96a524f3a907d3724c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89b6b8ed7339a37bb974b011f562fef7684eab85 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a545885da447cbcefa593efa1163db12a6ecaa2cdbbf6e96a524f3a907d3724c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b47fced936ebc1c7a08c866c2a0072f741805cf6a907e231233a7a9209daf67 +size 11947 diff --git a/data/2025/2504_05xxx/2504.05979/images/a5b89b647f49cafe186c5a2f230a7d551fedbe9a4b6ab523991f91c5baceb76f.jpg b/data/2025/2504_05xxx/2504.05979/images/a5b89b647f49cafe186c5a2f230a7d551fedbe9a4b6ab523991f91c5baceb76f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3990d151c10409478cc6c53abd5243f4d1337ec9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a5b89b647f49cafe186c5a2f230a7d551fedbe9a4b6ab523991f91c5baceb76f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99b17d3f249a94b5044b966d8e4a24d08249dc91c4ad9c3574d1af5f3536be34 +size 5862 diff --git a/data/2025/2504_05xxx/2504.05979/images/a5bc8e52d7842f4252e539ba34adcea369c6d11bb3eaa72ea23966ff1c26f6e2.jpg b/data/2025/2504_05xxx/2504.05979/images/a5bc8e52d7842f4252e539ba34adcea369c6d11bb3eaa72ea23966ff1c26f6e2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c20865165ac3c78e125a3dafa3f5763012a08680 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a5bc8e52d7842f4252e539ba34adcea369c6d11bb3eaa72ea23966ff1c26f6e2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ec0347bd2489d5b4db0b0877446fbb1f0a5cd4e3d070612355d294f29c0b506 +size 9077 diff --git a/data/2025/2504_05xxx/2504.05979/images/a5f0857371c912d73812959c5154aa5f588c9c53b0cac8d82d05e68ed4b24ba2.jpg b/data/2025/2504_05xxx/2504.05979/images/a5f0857371c912d73812959c5154aa5f588c9c53b0cac8d82d05e68ed4b24ba2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b92f8127a520277f27f7556309969940e7b94f0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a5f0857371c912d73812959c5154aa5f588c9c53b0cac8d82d05e68ed4b24ba2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0e462b2dd8005cf5a7d2ec8ce891ddabf064520eefe83e8ffeb74eeeb902e9f +size 902 diff --git a/data/2025/2504_05xxx/2504.05979/images/a6a106419b52434d8a55c2b5fb821d02c748ecc0597904cf146832238e26aadd.jpg b/data/2025/2504_05xxx/2504.05979/images/a6a106419b52434d8a55c2b5fb821d02c748ecc0597904cf146832238e26aadd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d570a811a5694d7810f17aea36ef3a5a25fac43 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a6a106419b52434d8a55c2b5fb821d02c748ecc0597904cf146832238e26aadd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae49bc0a26c5b8b8a9a92b5e615562bf84bb892fee28931cbcc82a11e8feb9e3 +size 8876 diff --git a/data/2025/2504_05xxx/2504.05979/images/a71db3675a1fc3707882b1ea7573faaee81a29d969a48f7917d231e5c96013a7.jpg b/data/2025/2504_05xxx/2504.05979/images/a71db3675a1fc3707882b1ea7573faaee81a29d969a48f7917d231e5c96013a7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0de91517264b3c1fb717526f814893ccf4d1c3b7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a71db3675a1fc3707882b1ea7573faaee81a29d969a48f7917d231e5c96013a7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dcd003052415927280720546306e47fc0e67f6085e27dfa5c8380bfb463f0ee +size 1011 diff --git a/data/2025/2504_05xxx/2504.05979/images/a84122365c616f0e518763e95e8748954778d1d6f37580aebe6d9fcdb6dbdefd.jpg b/data/2025/2504_05xxx/2504.05979/images/a84122365c616f0e518763e95e8748954778d1d6f37580aebe6d9fcdb6dbdefd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..438454d2b6e5ddb10f42172909f428296406b3bf --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a84122365c616f0e518763e95e8748954778d1d6f37580aebe6d9fcdb6dbdefd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1a72b12155cb04f9ee5e6ae2d488ef202d19b17200e45669401520241decb3e +size 5477 diff --git a/data/2025/2504_05xxx/2504.05979/images/a85c872e242b8a76e3a8010c80983519f0fb6a8346e50773ef2e7fab0b390d2a.jpg b/data/2025/2504_05xxx/2504.05979/images/a85c872e242b8a76e3a8010c80983519f0fb6a8346e50773ef2e7fab0b390d2a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..79a16bc3d4244c867e358376864c047024a30281 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a85c872e242b8a76e3a8010c80983519f0fb6a8346e50773ef2e7fab0b390d2a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:768a4787f2aab55adbf1726ef43388a1d7ddfe06315240125b0903d9ce7b98d5 +size 11137 diff --git a/data/2025/2504_05xxx/2504.05979/images/a8a15f935780009ae4408165cc5d5abf2965efa5cead6b31539ab6f6570babf3.jpg b/data/2025/2504_05xxx/2504.05979/images/a8a15f935780009ae4408165cc5d5abf2965efa5cead6b31539ab6f6570babf3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68e88295792b037e3ef15e8743c78db1a1398bf5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a8a15f935780009ae4408165cc5d5abf2965efa5cead6b31539ab6f6570babf3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4663ec84fd03dc8f729fb64013ee03d8dcdf06e1567dc2ef44bc4e9e3408076a +size 17055 diff --git a/data/2025/2504_05xxx/2504.05979/images/a8a92b2a237654ea8a1e5c6eca6a9384ebacc2f436fc6dc6a34e87dd215b9d99.jpg b/data/2025/2504_05xxx/2504.05979/images/a8a92b2a237654ea8a1e5c6eca6a9384ebacc2f436fc6dc6a34e87dd215b9d99.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6517c15360a98e1fce5e18c289a7db9165210ec5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a8a92b2a237654ea8a1e5c6eca6a9384ebacc2f436fc6dc6a34e87dd215b9d99.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:415a3a0130ca3b905837fbcddaeae4370b8bfa29f6696f7a52b4f8a47199850b +size 11951 diff --git a/data/2025/2504_05xxx/2504.05979/images/a8bf6e323c48a02f642dd0e8b8ab2bf45779e5f873173853590a980210b6b42b.jpg b/data/2025/2504_05xxx/2504.05979/images/a8bf6e323c48a02f642dd0e8b8ab2bf45779e5f873173853590a980210b6b42b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e6c5634a5dfc626b697884b34ac210361bcb41f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a8bf6e323c48a02f642dd0e8b8ab2bf45779e5f873173853590a980210b6b42b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb791ae8618b2e4da524293c72162f77e3d8d9e16198b2179a9cf9411759b33b +size 13203 diff --git a/data/2025/2504_05xxx/2504.05979/images/a92950a6ffd6c5949ed67dbcd8ef27f7a4455d1b77975ddb550332fdae784d04.jpg b/data/2025/2504_05xxx/2504.05979/images/a92950a6ffd6c5949ed67dbcd8ef27f7a4455d1b77975ddb550332fdae784d04.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a1ec5f05dac708cc2f9d796a4207df117740a45f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a92950a6ffd6c5949ed67dbcd8ef27f7a4455d1b77975ddb550332fdae784d04.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c64c798de5b8a150f95545c3b6f299415e95fe605d93de0fcafe85c519811766 +size 944 diff --git a/data/2025/2504_05xxx/2504.05979/images/a99706fb05eb2a9239f7691575789b91486fcd20c7175b2b5dd9b1fbd088f850.jpg b/data/2025/2504_05xxx/2504.05979/images/a99706fb05eb2a9239f7691575789b91486fcd20c7175b2b5dd9b1fbd088f850.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a1296a418583e479f09226bb71a55759babeec8c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/a99706fb05eb2a9239f7691575789b91486fcd20c7175b2b5dd9b1fbd088f850.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b876b4cb0958af2aba951bf8eb31e6a28fda6d7a9415e3330f142ab189509857 +size 5677 diff --git a/data/2025/2504_05xxx/2504.05979/images/aa0187c33d100a82b28f2505575ea9e419bbb4b364bc8feea7bf801423e56d3a.jpg b/data/2025/2504_05xxx/2504.05979/images/aa0187c33d100a82b28f2505575ea9e419bbb4b364bc8feea7bf801423e56d3a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d145c8c123969aba47e767a56c1ba48860bb654 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/aa0187c33d100a82b28f2505575ea9e419bbb4b364bc8feea7bf801423e56d3a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76c91520f1e38586bbec824f41f87c7c073ba782b0ccbc20d1926eb099a1c4de +size 12896 diff --git a/data/2025/2504_05xxx/2504.05979/images/aa32d16435cd0e8550916c3f311f8776216cf8e0260b722c1418d0d7619083ed.jpg b/data/2025/2504_05xxx/2504.05979/images/aa32d16435cd0e8550916c3f311f8776216cf8e0260b722c1418d0d7619083ed.jpg new file mode 100644 index 0000000000000000000000000000000000000000..59ac47c015e04ca5c7d2485e91efc3d9d1667c95 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/aa32d16435cd0e8550916c3f311f8776216cf8e0260b722c1418d0d7619083ed.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ccf757e697296c8726497e6a86f10a9e12273e292314d8ee39b58346eb675ad +size 2854 diff --git a/data/2025/2504_05xxx/2504.05979/images/aa5d8bd6655c3924c3860521fa7d3aabfae95679b8638674f96b892057031bdf.jpg b/data/2025/2504_05xxx/2504.05979/images/aa5d8bd6655c3924c3860521fa7d3aabfae95679b8638674f96b892057031bdf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..037deaf701ede3793c09dce78a5f432db91e8527 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/aa5d8bd6655c3924c3860521fa7d3aabfae95679b8638674f96b892057031bdf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d60d00471627aa7c094b078d26ee05d2fd23948d2dcb22c231271ecd01c39723 +size 9424 diff --git a/data/2025/2504_05xxx/2504.05979/images/aa66b3606a91f980129cef7b09a646b9b32aae5937a1a800da9a62b68a314796.jpg b/data/2025/2504_05xxx/2504.05979/images/aa66b3606a91f980129cef7b09a646b9b32aae5937a1a800da9a62b68a314796.jpg new file mode 100644 index 0000000000000000000000000000000000000000..48c534858ebc239884425c7e2bbf9deea6f0e4bc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/aa66b3606a91f980129cef7b09a646b9b32aae5937a1a800da9a62b68a314796.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee583e21dd3051630ca056eada07a075d940f85124e4d7f06809198763229438 +size 11247 diff --git a/data/2025/2504_05xxx/2504.05979/images/aab60cd3ceb814eae695e39c17c9ede66cb9a44a4700601d9febe20289cdba35.jpg b/data/2025/2504_05xxx/2504.05979/images/aab60cd3ceb814eae695e39c17c9ede66cb9a44a4700601d9febe20289cdba35.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2ae0b2a15486ad0c3e95f5b832324d689cb72def --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/aab60cd3ceb814eae695e39c17c9ede66cb9a44a4700601d9febe20289cdba35.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a01abcda4b6ddfd605a64efa2f814ccccb996c9e26a8f3b877c97b9a64aa41b0 +size 12451 diff --git a/data/2025/2504_05xxx/2504.05979/images/ab3420504662b7f8244c018e079e1cffd82ab918262a3443ff4a8a16e52e66fe.jpg b/data/2025/2504_05xxx/2504.05979/images/ab3420504662b7f8244c018e079e1cffd82ab918262a3443ff4a8a16e52e66fe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..287e2c4ef424c2d38e7b87348ee06544738a27ea --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ab3420504662b7f8244c018e079e1cffd82ab918262a3443ff4a8a16e52e66fe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8c27ca5d0ddb7df6d23414ecf8e8cac9d793f59f67fb89e5ba2e72096c02588 +size 14074 diff --git a/data/2025/2504_05xxx/2504.05979/images/ab5d35fd29583376a884277394b92fa4599215c40d88cdcc94c6b9c9bd93f1ae.jpg b/data/2025/2504_05xxx/2504.05979/images/ab5d35fd29583376a884277394b92fa4599215c40d88cdcc94c6b9c9bd93f1ae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3875f2b82532ac26158536fa558d4c33e26b16b4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ab5d35fd29583376a884277394b92fa4599215c40d88cdcc94c6b9c9bd93f1ae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f50a12b33dfc0063350efeec328064b3d60821ba5a1915e69d85af2db599c0f6 +size 7433 diff --git a/data/2025/2504_05xxx/2504.05979/images/ab685c4d565c19e94d0af9de2b15ce72d5e4d59e9fb5279055cc29a626f1a509.jpg b/data/2025/2504_05xxx/2504.05979/images/ab685c4d565c19e94d0af9de2b15ce72d5e4d59e9fb5279055cc29a626f1a509.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d8bfd2d7bbfd2add71c34696d01031949b2d9177 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ab685c4d565c19e94d0af9de2b15ce72d5e4d59e9fb5279055cc29a626f1a509.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:574c185653c043f1e6ce426764055f66a052f55e172e5334a6b239a6f42f7fe2 +size 9816 diff --git a/data/2025/2504_05xxx/2504.05979/images/ab89160a98b1c9923175245ecb1b778b6c8bedaa2a414c4e3a82178ef3554b45.jpg b/data/2025/2504_05xxx/2504.05979/images/ab89160a98b1c9923175245ecb1b778b6c8bedaa2a414c4e3a82178ef3554b45.jpg new file mode 100644 index 0000000000000000000000000000000000000000..08de55538d80e76e4d78cbc0e7b74eb23bcc0a97 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ab89160a98b1c9923175245ecb1b778b6c8bedaa2a414c4e3a82178ef3554b45.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b220e5ae91cb6326df70f7d173d6688c3eb652f5dddf1aff5f2f340c5afccfd +size 13391 diff --git a/data/2025/2504_05xxx/2504.05979/images/ab95d11bd4370f4489f59f00bf95b0420cf3629bf46e660886023f3f96118d89.jpg b/data/2025/2504_05xxx/2504.05979/images/ab95d11bd4370f4489f59f00bf95b0420cf3629bf46e660886023f3f96118d89.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8e5379fc92a691a57e46e7f56c4d22971d2bf64d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ab95d11bd4370f4489f59f00bf95b0420cf3629bf46e660886023f3f96118d89.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aeb2caa97faf69fd4116903d5b58850cdaf8159ff50a8c0bcc0969347989c73e +size 13825 diff --git a/data/2025/2504_05xxx/2504.05979/images/ab999568dff4abd05c3fdb65bddbc6ea19103d123770203b58e1c0014cd42fd2.jpg b/data/2025/2504_05xxx/2504.05979/images/ab999568dff4abd05c3fdb65bddbc6ea19103d123770203b58e1c0014cd42fd2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aecb2e80b574ecd862015113c18d198b3c540295 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ab999568dff4abd05c3fdb65bddbc6ea19103d123770203b58e1c0014cd42fd2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d99eb3c0d0c731daea7a8c4287f405e6ca313d4499d06864c596a89c6182ff4a +size 12760 diff --git a/data/2025/2504_05xxx/2504.05979/images/aba75b8ad4c326892ca96755807604f259178d54af19a83236adab4115951198.jpg b/data/2025/2504_05xxx/2504.05979/images/aba75b8ad4c326892ca96755807604f259178d54af19a83236adab4115951198.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6d694e3b4f5e256e0a783194af4450a972ecb15a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/aba75b8ad4c326892ca96755807604f259178d54af19a83236adab4115951198.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7018ffb2a8e3337b9ff0b08190b86244e357f27a9573597232eaed09b9c87c88 +size 2987 diff --git a/data/2025/2504_05xxx/2504.05979/images/abb40d377e6294ce85ec725e10870be0d09e757df45ef1ac17d63772ebe95887.jpg b/data/2025/2504_05xxx/2504.05979/images/abb40d377e6294ce85ec725e10870be0d09e757df45ef1ac17d63772ebe95887.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c27b58ab491565726dda9f77d8a416a84e2bfdcf --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/abb40d377e6294ce85ec725e10870be0d09e757df45ef1ac17d63772ebe95887.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8577b1c64bff24ca8aa315aebb4d2395804e7240dd788567a3a56468e6cd1528 +size 9147 diff --git a/data/2025/2504_05xxx/2504.05979/images/abb6c191cd8f3dbc58255f85b9eca1ff0ecbe31840d418b03345458fcd36ab20.jpg b/data/2025/2504_05xxx/2504.05979/images/abb6c191cd8f3dbc58255f85b9eca1ff0ecbe31840d418b03345458fcd36ab20.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e1d4bc573a7f7768c9cd393abb8d079302f13546 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/abb6c191cd8f3dbc58255f85b9eca1ff0ecbe31840d418b03345458fcd36ab20.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb6056047b1893770c9a89f68794fe258bd7cb671540f95e0cbde849ce2c209a +size 15936 diff --git a/data/2025/2504_05xxx/2504.05979/images/abf7f3e688bfa59a5ecc1db78f61491254ec021fedeea5c85187c6562809d61c.jpg b/data/2025/2504_05xxx/2504.05979/images/abf7f3e688bfa59a5ecc1db78f61491254ec021fedeea5c85187c6562809d61c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b9b286d299d491813a1c144bf531f3429efbb8e1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/abf7f3e688bfa59a5ecc1db78f61491254ec021fedeea5c85187c6562809d61c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af4f6689dced2482733056d656a930fa1797621a22bedd0ebdeea258496f13e9 +size 9544 diff --git a/data/2025/2504_05xxx/2504.05979/images/acc8a88501794d5cb5abb73802e25f714233bc33723c8874cf9c6c27f2a19f7d.jpg b/data/2025/2504_05xxx/2504.05979/images/acc8a88501794d5cb5abb73802e25f714233bc33723c8874cf9c6c27f2a19f7d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..07222b67bda2f668561def9b238f85e1500a53f8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/acc8a88501794d5cb5abb73802e25f714233bc33723c8874cf9c6c27f2a19f7d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3e1d56b55075e51f556cd0c8c7a08349560a645beff259d36af1831a39b01d5 +size 4671 diff --git a/data/2025/2504_05xxx/2504.05979/images/adcb856bb8a351cb62cc8d6fd9dcfe0f715a542653534f39e99b1973a7067ec8.jpg b/data/2025/2504_05xxx/2504.05979/images/adcb856bb8a351cb62cc8d6fd9dcfe0f715a542653534f39e99b1973a7067ec8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ea66558071f60c3440141ba9a483cb57078b8c2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/adcb856bb8a351cb62cc8d6fd9dcfe0f715a542653534f39e99b1973a7067ec8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd099b736064cf9dc67f95370925a402a76be61a7ca01403938f0eb1afbd296b +size 15698 diff --git a/data/2025/2504_05xxx/2504.05979/images/ae6de595371f09d4b568904e9ac0c1c094ae221ff86659caaecc10d0524ac9df.jpg b/data/2025/2504_05xxx/2504.05979/images/ae6de595371f09d4b568904e9ac0c1c094ae221ff86659caaecc10d0524ac9df.jpg new file mode 100644 index 0000000000000000000000000000000000000000..84a271d94bfdd5c1024ca405f7174986d4b52d6d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ae6de595371f09d4b568904e9ac0c1c094ae221ff86659caaecc10d0524ac9df.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3380d588a336131dfa3287f976ce62109d108802c46300f2879154db8df2293c +size 16845 diff --git a/data/2025/2504_05xxx/2504.05979/images/ae911cf1c44c913e1579fdc6ee25f6acd40fabf3b6bb39cb4fd1df2fd7c49995.jpg b/data/2025/2504_05xxx/2504.05979/images/ae911cf1c44c913e1579fdc6ee25f6acd40fabf3b6bb39cb4fd1df2fd7c49995.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c243ea9749f41740e2d34702b1168f746e86037f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ae911cf1c44c913e1579fdc6ee25f6acd40fabf3b6bb39cb4fd1df2fd7c49995.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7c3c40d31b57427b8ac36597ce47cacf1a185d97f6f94ad3cfebe9044137c77 +size 7675 diff --git a/data/2025/2504_05xxx/2504.05979/images/af45afc3aa4a8340ccb4e22deeda2a924e0315faecedd77d56e667ae464a5ec2.jpg b/data/2025/2504_05xxx/2504.05979/images/af45afc3aa4a8340ccb4e22deeda2a924e0315faecedd77d56e667ae464a5ec2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f7c1e5d39dd659958ff3f71891dd24440cfcc13 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/af45afc3aa4a8340ccb4e22deeda2a924e0315faecedd77d56e667ae464a5ec2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdc9b07cbfe21b8df9f0f3eba707b2b1416c5bb688373534cf68293577ab13de +size 9998 diff --git a/data/2025/2504_05xxx/2504.05979/images/afb58fab9b72f3bf1b78d415b4d5e8ca817660113f2b9a1da7dee7153ca97330.jpg b/data/2025/2504_05xxx/2504.05979/images/afb58fab9b72f3bf1b78d415b4d5e8ca817660113f2b9a1da7dee7153ca97330.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c6a7b18d055c6220e2836470dd7bf28572910fcb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/afb58fab9b72f3bf1b78d415b4d5e8ca817660113f2b9a1da7dee7153ca97330.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1e830bf8c41b4991000334e1e3ccdd5a2d4ee613ae290f88bd281e1b01b7aa1 +size 9500 diff --git a/data/2025/2504_05xxx/2504.05979/images/afce16d9cf439bcdc04fa94f8ca4f2227195b2cf36f175b299fa1b98664ddbfa.jpg b/data/2025/2504_05xxx/2504.05979/images/afce16d9cf439bcdc04fa94f8ca4f2227195b2cf36f175b299fa1b98664ddbfa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f5a297d98e23e5d4fe8019fe1a8f3b2467d79a0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/afce16d9cf439bcdc04fa94f8ca4f2227195b2cf36f175b299fa1b98664ddbfa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03ca36e22cef6fdaeb24315f3fb4cb398f4e165b7aaa1706fb8d2c2ea5f8cf00 +size 15629 diff --git a/data/2025/2504_05xxx/2504.05979/images/aff34b2fc3ec9bfe6b6d30adc9b2581aab5368e8efd4ea317e48bc4626796721.jpg b/data/2025/2504_05xxx/2504.05979/images/aff34b2fc3ec9bfe6b6d30adc9b2581aab5368e8efd4ea317e48bc4626796721.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d1d793c10e5ed910c83e637ebb3f1943337c94e7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/aff34b2fc3ec9bfe6b6d30adc9b2581aab5368e8efd4ea317e48bc4626796721.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:baf3892f150cfa83dae6fe844cbda0bfc2d72005a0106811c33e64692e09c281 +size 9418 diff --git a/data/2025/2504_05xxx/2504.05979/images/b05322a8b0f75be84fd68e7685029fe222a8456412836c464982ecfc2b379f16.jpg b/data/2025/2504_05xxx/2504.05979/images/b05322a8b0f75be84fd68e7685029fe222a8456412836c464982ecfc2b379f16.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b3bda173f642269bc3b0f435407fb7d18f247775 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b05322a8b0f75be84fd68e7685029fe222a8456412836c464982ecfc2b379f16.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b5c898d5df22a5a28f1f494d96d4cd4dd960d007ac3f935d8d55b8b96338663 +size 10931 diff --git a/data/2025/2504_05xxx/2504.05979/images/b09f487669528fc2963ebabd0310ed69c5b420b7512e220fe3465e3c2ed5e12b.jpg b/data/2025/2504_05xxx/2504.05979/images/b09f487669528fc2963ebabd0310ed69c5b420b7512e220fe3465e3c2ed5e12b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8255d8efb78169e461ff256e8415ca06d1fa072b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b09f487669528fc2963ebabd0310ed69c5b420b7512e220fe3465e3c2ed5e12b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55660b2da45764e29b50ca855d53c769eb969485056ed03ed7f9c16fc36887d4 +size 8629 diff --git a/data/2025/2504_05xxx/2504.05979/images/b0d136fb714a9be563b2f3204d0e0bad1bfffa49452c0d77a5bdfb3a14e364d0.jpg b/data/2025/2504_05xxx/2504.05979/images/b0d136fb714a9be563b2f3204d0e0bad1bfffa49452c0d77a5bdfb3a14e364d0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..59a2fd89c7398f1c814fadf82700e312b45c3dbf --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b0d136fb714a9be563b2f3204d0e0bad1bfffa49452c0d77a5bdfb3a14e364d0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c852427ebb770ae063c5ae06b033816670afafb408eae2ff1947d2b316f889d +size 5702 diff --git a/data/2025/2504_05xxx/2504.05979/images/b0d254da72b338ef668b468732303651a5ecbf992ea9575f30ca06355f2c2edc.jpg b/data/2025/2504_05xxx/2504.05979/images/b0d254da72b338ef668b468732303651a5ecbf992ea9575f30ca06355f2c2edc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..be9b39322ce28051f7dc0a464fac11f84054edc0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b0d254da72b338ef668b468732303651a5ecbf992ea9575f30ca06355f2c2edc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6640af796ae145285196ebfef118e16b2fac987f717bd323db829e2559134900 +size 12459 diff --git a/data/2025/2504_05xxx/2504.05979/images/b13f052399947f2c4da3a2882a7878f31fc79514f3fe8a809951a32ca1140118.jpg b/data/2025/2504_05xxx/2504.05979/images/b13f052399947f2c4da3a2882a7878f31fc79514f3fe8a809951a32ca1140118.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f9a80d390af9d5c9e6139b3e385a3aac0e0f874 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b13f052399947f2c4da3a2882a7878f31fc79514f3fe8a809951a32ca1140118.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:baf77ecef2493a91ecb794bba7d6d1d1439bea6776db50b7c83b9594b9b4b70e +size 3214 diff --git a/data/2025/2504_05xxx/2504.05979/images/b143d496a975b31c007201551ab2858b5f989539fd28932d11bf8433f8fa182f.jpg b/data/2025/2504_05xxx/2504.05979/images/b143d496a975b31c007201551ab2858b5f989539fd28932d11bf8433f8fa182f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d337295e6631a10586ff4ac9c72177eac456c0a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b143d496a975b31c007201551ab2858b5f989539fd28932d11bf8433f8fa182f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0648619daf8f813d84862c13e9bfed25e738e3520451607ff291c41c1c18873c +size 12519 diff --git a/data/2025/2504_05xxx/2504.05979/images/b15793d17ca7c80bf1b909d279a47bdf5b3a139165326ca3c1dffd011f7b0722.jpg b/data/2025/2504_05xxx/2504.05979/images/b15793d17ca7c80bf1b909d279a47bdf5b3a139165326ca3c1dffd011f7b0722.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8c758298c49fd4339677252501a7e808d7b81859 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b15793d17ca7c80bf1b909d279a47bdf5b3a139165326ca3c1dffd011f7b0722.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:873969d020efbe5c9530403e268c65cc94ee6332380bf50769a1737e1d2b40ae +size 11588 diff --git a/data/2025/2504_05xxx/2504.05979/images/b169d0c531726750bbaeeb2fde03b21e893573395b4260fafdf640d946589a90.jpg b/data/2025/2504_05xxx/2504.05979/images/b169d0c531726750bbaeeb2fde03b21e893573395b4260fafdf640d946589a90.jpg new file mode 100644 index 0000000000000000000000000000000000000000..947aa93cf66ac67c5179e56f8e3d36d5364ad99a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b169d0c531726750bbaeeb2fde03b21e893573395b4260fafdf640d946589a90.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33dcb6d3fd1c811a322dd56ee63691d81d7d146b374311ad0585e05b4fbff347 +size 12916 diff --git a/data/2025/2504_05xxx/2504.05979/images/b1e74b98b6b45c314db43ad44efec736de0526ea53933c41d6f494dfc222ec5c.jpg b/data/2025/2504_05xxx/2504.05979/images/b1e74b98b6b45c314db43ad44efec736de0526ea53933c41d6f494dfc222ec5c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d5e0e8da73869013999b3358cc918ff7a4d32b30 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b1e74b98b6b45c314db43ad44efec736de0526ea53933c41d6f494dfc222ec5c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:048d4fd57f1291e25339de71d9464e8a92a39539f80e1a0c85c18722d303a87b +size 10185 diff --git a/data/2025/2504_05xxx/2504.05979/images/b2861bea0423cee774150f0887bdb41c652b436f4eada06228dfb7f7f53078b3.jpg b/data/2025/2504_05xxx/2504.05979/images/b2861bea0423cee774150f0887bdb41c652b436f4eada06228dfb7f7f53078b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0858d90509b2fc6f602993155912e5c2dfe360fc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b2861bea0423cee774150f0887bdb41c652b436f4eada06228dfb7f7f53078b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c724181902afa3e226779241f8ffc1c659c0ae66b4efb2770788b7803320d6f6 +size 18382 diff --git a/data/2025/2504_05xxx/2504.05979/images/b2936841b119f7c4783c46da417c26650df828319071a28b934df3513e885121.jpg b/data/2025/2504_05xxx/2504.05979/images/b2936841b119f7c4783c46da417c26650df828319071a28b934df3513e885121.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a4ebc5ddb5115c258c26ba31e6c5e4326f5d848e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b2936841b119f7c4783c46da417c26650df828319071a28b934df3513e885121.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ae6b2f6735eae593fa1143258070672090877243b73167683ffc9433ce86194 +size 8996 diff --git a/data/2025/2504_05xxx/2504.05979/images/b2bbda9be8e2840ce6f8b02ad35c98e1401b5728eb36e05879ea84bdd7c2ff9f.jpg b/data/2025/2504_05xxx/2504.05979/images/b2bbda9be8e2840ce6f8b02ad35c98e1401b5728eb36e05879ea84bdd7c2ff9f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..689a0bd5cab09c8beda407d8250b99c182b25f49 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b2bbda9be8e2840ce6f8b02ad35c98e1401b5728eb36e05879ea84bdd7c2ff9f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8aeffc7c45585a73e424fa30dc926aa41d8a857bc786a5ce29b9a1fc62e398fc +size 13508 diff --git a/data/2025/2504_05xxx/2504.05979/images/b33cf7d522604acf3a5bd5c343b02f71b892c5585b03a7601c2a93da774f7f45.jpg b/data/2025/2504_05xxx/2504.05979/images/b33cf7d522604acf3a5bd5c343b02f71b892c5585b03a7601c2a93da774f7f45.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a3296fd550393742812efa449bae4a6c2639d8ad --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b33cf7d522604acf3a5bd5c343b02f71b892c5585b03a7601c2a93da774f7f45.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df590ee091997b338c1a605d1c6276bdb0a21ce850a2054d348c89402aeded94 +size 11644 diff --git a/data/2025/2504_05xxx/2504.05979/images/b33e1d9b8d191b6badbe6994b410d984583d93bb0eec0eb614f21170a2cdb7d5.jpg b/data/2025/2504_05xxx/2504.05979/images/b33e1d9b8d191b6badbe6994b410d984583d93bb0eec0eb614f21170a2cdb7d5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cb970ff9c94b8f940b36c766325a2be52921d9c6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b33e1d9b8d191b6badbe6994b410d984583d93bb0eec0eb614f21170a2cdb7d5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63d0f8852fc91181d2b50ffb467570bcef395b3cf258ca89e3ae5571f8a49c19 +size 11345 diff --git a/data/2025/2504_05xxx/2504.05979/images/b359a631d7b7dee251d05aec14e46acaab4300aca8d16ae17240156e943e2d83.jpg b/data/2025/2504_05xxx/2504.05979/images/b359a631d7b7dee251d05aec14e46acaab4300aca8d16ae17240156e943e2d83.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a9662db6ddda49b67a5a76a2e50c118b153f4fb1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b359a631d7b7dee251d05aec14e46acaab4300aca8d16ae17240156e943e2d83.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b639ff91fa9e985dacc6cf4ec0c7f0738b4b1bc7d8d14807ac31b4664d20986 +size 9381 diff --git a/data/2025/2504_05xxx/2504.05979/images/b3d11b71a64fbc57f21f8ab46676a1e3eb0408ddfc762cacc7d3fc67d895124c.jpg b/data/2025/2504_05xxx/2504.05979/images/b3d11b71a64fbc57f21f8ab46676a1e3eb0408ddfc762cacc7d3fc67d895124c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f8b87986cce6152e2e0ac6879b388b20cd51e92c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b3d11b71a64fbc57f21f8ab46676a1e3eb0408ddfc762cacc7d3fc67d895124c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c64565e6f4494e729bf2de419bc266e591f6149aa0eb31baced8d229bc7420f +size 16614 diff --git a/data/2025/2504_05xxx/2504.05979/images/b41cebcb69ab1488c833010b4ba4c0e40ecd6d2c22ed7583995ccecfc50154af.jpg b/data/2025/2504_05xxx/2504.05979/images/b41cebcb69ab1488c833010b4ba4c0e40ecd6d2c22ed7583995ccecfc50154af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a16b74c99b927ef3e912ad6acb0b3799d227f832 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b41cebcb69ab1488c833010b4ba4c0e40ecd6d2c22ed7583995ccecfc50154af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48968403e228e2c3bac97d831f4f56bfa2f36d139de3c5745f0d80db8913039a +size 18217 diff --git a/data/2025/2504_05xxx/2504.05979/images/b44926116e89a328041a0572f04bcac45c5be77916af431b03c4a0ab99dd761f.jpg b/data/2025/2504_05xxx/2504.05979/images/b44926116e89a328041a0572f04bcac45c5be77916af431b03c4a0ab99dd761f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0afda16997d60436c246f6b45c95c1bc2d6e9007 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b44926116e89a328041a0572f04bcac45c5be77916af431b03c4a0ab99dd761f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39c798baef439739df247cb0a7bfc3654de2515c40c53de537c15240f823b862 +size 13965 diff --git a/data/2025/2504_05xxx/2504.05979/images/b44b50f9572bbd72865bce64600e8f10dc36affa85e764b23905c4db84176b3f.jpg b/data/2025/2504_05xxx/2504.05979/images/b44b50f9572bbd72865bce64600e8f10dc36affa85e764b23905c4db84176b3f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9fb132983d692fa7e84736ecf27c1e46f4835e59 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b44b50f9572bbd72865bce64600e8f10dc36affa85e764b23905c4db84176b3f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fdd1d4e4df2392164d89cc516ada23b43277bd426655daf6b53d427afb19be2 +size 8105 diff --git a/data/2025/2504_05xxx/2504.05979/images/b50fd5a292c370336aaa3e339bafb070bea7a7a2f80c934dad264af03b23bb2d.jpg b/data/2025/2504_05xxx/2504.05979/images/b50fd5a292c370336aaa3e339bafb070bea7a7a2f80c934dad264af03b23bb2d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..48eeb46fc0033d2ad77e29badfec2ba91db25c4c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b50fd5a292c370336aaa3e339bafb070bea7a7a2f80c934dad264af03b23bb2d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ff4cab0c55ff51702c75dc4d590e4fed443b5dc49d4135ca998c0e49e65c69d +size 8377 diff --git a/data/2025/2504_05xxx/2504.05979/images/b553f7bab31ff9efef4880bc6749cc3b0972fb954926e7cddb979fe4fa86b1e4.jpg b/data/2025/2504_05xxx/2504.05979/images/b553f7bab31ff9efef4880bc6749cc3b0972fb954926e7cddb979fe4fa86b1e4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5919d718d90a2d7a60ceb4c629415f458d174a75 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b553f7bab31ff9efef4880bc6749cc3b0972fb954926e7cddb979fe4fa86b1e4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8af5707e2fd440cd2e3de5c8353155b60f6f5948f79530ef82db44efdb7e7178 +size 14284 diff --git a/data/2025/2504_05xxx/2504.05979/images/b56cffa92dcc8965f109102c1a0975dedfc665539ea8cf6c71d5a2879dfe8dcc.jpg b/data/2025/2504_05xxx/2504.05979/images/b56cffa92dcc8965f109102c1a0975dedfc665539ea8cf6c71d5a2879dfe8dcc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..354c48f73593747a43f5dcebd8bf7b87ed1baebb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b56cffa92dcc8965f109102c1a0975dedfc665539ea8cf6c71d5a2879dfe8dcc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68656e414072d640828d02ef9eca5ed106ad2c00545cfc9984108804bb994bd3 +size 11291 diff --git a/data/2025/2504_05xxx/2504.05979/images/b57c9edcd60d270e665488237cb0979f4c4725a3a0c7e8682695c4bab96be517.jpg b/data/2025/2504_05xxx/2504.05979/images/b57c9edcd60d270e665488237cb0979f4c4725a3a0c7e8682695c4bab96be517.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e63c43fd309e2d5a9e53067c79920aba76197995 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b57c9edcd60d270e665488237cb0979f4c4725a3a0c7e8682695c4bab96be517.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:116cd0d8055f907d51133af80f4e9ea30a8021c4564e9b1a7d28cc0269f24cc7 +size 19491 diff --git a/data/2025/2504_05xxx/2504.05979/images/b5b70fc8304a56c9d8bae3967435e4b106ab643b441b2630a6c6e823636ba844.jpg b/data/2025/2504_05xxx/2504.05979/images/b5b70fc8304a56c9d8bae3967435e4b106ab643b441b2630a6c6e823636ba844.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e6456524e736ef2421934f6c797f502d16649ac5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b5b70fc8304a56c9d8bae3967435e4b106ab643b441b2630a6c6e823636ba844.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b767c05c0abf3e714ec55e49880d000557043298e4cc25551850aed5788debe +size 10072 diff --git a/data/2025/2504_05xxx/2504.05979/images/b5bfd8b85c6c3bc556cebefab62a44ff37c4d3e8e69240fa38a6ac13c7cc168b.jpg b/data/2025/2504_05xxx/2504.05979/images/b5bfd8b85c6c3bc556cebefab62a44ff37c4d3e8e69240fa38a6ac13c7cc168b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e1e159a8e9b197ce0629b6a8637932f29daa5d80 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b5bfd8b85c6c3bc556cebefab62a44ff37c4d3e8e69240fa38a6ac13c7cc168b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be6ac3d82b5946e79817c1f4f2d3a500b1f9fa9dd2e0eec7f5a0347464a42f13 +size 9142 diff --git a/data/2025/2504_05xxx/2504.05979/images/b6d6e46c7e1fd50306d7fb544b81d3bcbe88cbf2672995aac7590c083fadec72.jpg b/data/2025/2504_05xxx/2504.05979/images/b6d6e46c7e1fd50306d7fb544b81d3bcbe88cbf2672995aac7590c083fadec72.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6fc47c82f66bfe63fa4cde9343b226555369805b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b6d6e46c7e1fd50306d7fb544b81d3bcbe88cbf2672995aac7590c083fadec72.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:146047e4ab875ff1f5c85487e5513f51058ba7a46f5db87aed215f78361f4a6a +size 12510 diff --git a/data/2025/2504_05xxx/2504.05979/images/b6efdeb3733117ab0264a27bea6c1da70b95aac300ccfe78cc7e7eaf29cc41bc.jpg b/data/2025/2504_05xxx/2504.05979/images/b6efdeb3733117ab0264a27bea6c1da70b95aac300ccfe78cc7e7eaf29cc41bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0d9fd5a0d598e5b41fb834c8ecf7324ba5bb0ef0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b6efdeb3733117ab0264a27bea6c1da70b95aac300ccfe78cc7e7eaf29cc41bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4f03d004552668a4d051f3b1e4dd4aa2f5ffc2e943ebd8f8d7df8e4e3ed503b +size 14999 diff --git a/data/2025/2504_05xxx/2504.05979/images/b7302a93838bd0514ce6b4544837c9f5854b7f09d9a31b036a582c5735883900.jpg b/data/2025/2504_05xxx/2504.05979/images/b7302a93838bd0514ce6b4544837c9f5854b7f09d9a31b036a582c5735883900.jpg new file mode 100644 index 0000000000000000000000000000000000000000..36a01f6c0c2c3eed140e6e3170879aa71b6fc603 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b7302a93838bd0514ce6b4544837c9f5854b7f09d9a31b036a582c5735883900.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd51e23c6efd4494aec01ffc80acee7cc5d19ba8eabb0f03892f978f1c60b7ba +size 14443 diff --git a/data/2025/2504_05xxx/2504.05979/images/b7a34b10991c1b9ad9cd6f27bad980e361343453a99acd05c8354db9560287b5.jpg b/data/2025/2504_05xxx/2504.05979/images/b7a34b10991c1b9ad9cd6f27bad980e361343453a99acd05c8354db9560287b5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..79b020844baf9872e8cee725c14a8ce5351652f0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b7a34b10991c1b9ad9cd6f27bad980e361343453a99acd05c8354db9560287b5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69629484ecac6ee285ac53301ce6f3824b83b1b603f7edc0eea6d53c2fcb313a +size 12189 diff --git a/data/2025/2504_05xxx/2504.05979/images/b95a63199f82c29f9f6a1d717b00f28bde57c3fa11bf2de4c55eab61642dcf7e.jpg b/data/2025/2504_05xxx/2504.05979/images/b95a63199f82c29f9f6a1d717b00f28bde57c3fa11bf2de4c55eab61642dcf7e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..50eeb516eda9f5f2d391f878705f801914f25986 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/b95a63199f82c29f9f6a1d717b00f28bde57c3fa11bf2de4c55eab61642dcf7e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dcdec1badbadd84d13afc0020b30840d329600f3c9e9a8c5c7f2ef5bb31ecd92 +size 9756 diff --git a/data/2025/2504_05xxx/2504.05979/images/ba72c57d88f3c19ba187e2c20d73fd9352c5b8f47d0b13557d72b3f97860362a.jpg b/data/2025/2504_05xxx/2504.05979/images/ba72c57d88f3c19ba187e2c20d73fd9352c5b8f47d0b13557d72b3f97860362a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..95b5652d2577bf9d8e10ff61b2e2eb0031b8d149 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ba72c57d88f3c19ba187e2c20d73fd9352c5b8f47d0b13557d72b3f97860362a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7823fcbb53ed24c9881e9561ff56dee4701f09e01bc332f16367657a4a373a1 +size 6076 diff --git a/data/2025/2504_05xxx/2504.05979/images/badd5fb657f99aaab26d4d7e7efa36a6fb517d65faf566ed3fe8c5ae8f6f35d8.jpg b/data/2025/2504_05xxx/2504.05979/images/badd5fb657f99aaab26d4d7e7efa36a6fb517d65faf566ed3fe8c5ae8f6f35d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a559922a147057ce9f2e5efbda19f3da89357730 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/badd5fb657f99aaab26d4d7e7efa36a6fb517d65faf566ed3fe8c5ae8f6f35d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f39dc8dc0a653231653ed699b7e341253c21f98ab1bbd8f0d5007e91162b44f3 +size 13130 diff --git a/data/2025/2504_05xxx/2504.05979/images/bb239bc12ad173112db53edc8c9ee898e6ae74bcd56f4a9d4d2f4e2aa118d2f3.jpg b/data/2025/2504_05xxx/2504.05979/images/bb239bc12ad173112db53edc8c9ee898e6ae74bcd56f4a9d4d2f4e2aa118d2f3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..088b98435bc1b53bbc5f9bf3d108fe2eb2b31dd5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/bb239bc12ad173112db53edc8c9ee898e6ae74bcd56f4a9d4d2f4e2aa118d2f3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:630dd9974444d745fcd3c7a7290392c6229023ba17887b45e334fa7296f170b3 +size 10939 diff --git a/data/2025/2504_05xxx/2504.05979/images/bb6ced5f0c7f3239e75cdaf62739d7bd88f4e2dea5220e63e06fbbda019b0215.jpg b/data/2025/2504_05xxx/2504.05979/images/bb6ced5f0c7f3239e75cdaf62739d7bd88f4e2dea5220e63e06fbbda019b0215.jpg new file mode 100644 index 0000000000000000000000000000000000000000..66eaeb0e9a11dec86a754eca54626e8270f33165 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/bb6ced5f0c7f3239e75cdaf62739d7bd88f4e2dea5220e63e06fbbda019b0215.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f2f471a399bd9291e8a45cfb423c4d110de681ec4a22d4d44ff83b688b0e48b +size 8240 diff --git a/data/2025/2504_05xxx/2504.05979/images/bba2b69130e87b6f2573ea275d85a084e8a7be0d184cc6aba44beed927746b1f.jpg b/data/2025/2504_05xxx/2504.05979/images/bba2b69130e87b6f2573ea275d85a084e8a7be0d184cc6aba44beed927746b1f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..18126fb8a3984c5a9dd644ca2099feaf65e50104 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/bba2b69130e87b6f2573ea275d85a084e8a7be0d184cc6aba44beed927746b1f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d369bf94c5fdd5f469a0951bd770e9a21a0f55b58854f0c038b32cff355be9d9 +size 13731 diff --git a/data/2025/2504_05xxx/2504.05979/images/bc1073fb4ee6a5f6762fbba75598ee620fd0b2f89f1eb8f7b28e02c9a973045e.jpg b/data/2025/2504_05xxx/2504.05979/images/bc1073fb4ee6a5f6762fbba75598ee620fd0b2f89f1eb8f7b28e02c9a973045e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a777e2d0c64867d4149ee32623f3471993c9ed1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/bc1073fb4ee6a5f6762fbba75598ee620fd0b2f89f1eb8f7b28e02c9a973045e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:789695e49a189453bb7ab9e2c701443e49d79a132ca7c547c3369aea2d3e958a +size 16055 diff --git a/data/2025/2504_05xxx/2504.05979/images/bc7f355d6226e9ae8b3bf01b1ec603dc9a91ef5a37b5df843f5fa2d53f805b40.jpg b/data/2025/2504_05xxx/2504.05979/images/bc7f355d6226e9ae8b3bf01b1ec603dc9a91ef5a37b5df843f5fa2d53f805b40.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d40883416e8276cde31958160aa888e95abb4357 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/bc7f355d6226e9ae8b3bf01b1ec603dc9a91ef5a37b5df843f5fa2d53f805b40.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19cb754a1d552ee830c2699f6eb8f856ec14a1520f26710ca66ca9f0447cd821 +size 12609 diff --git a/data/2025/2504_05xxx/2504.05979/images/bcd70602d56a2f46a9663b8b18aa7be1f8cc70645914af87662cc424f8bd98e1.jpg b/data/2025/2504_05xxx/2504.05979/images/bcd70602d56a2f46a9663b8b18aa7be1f8cc70645914af87662cc424f8bd98e1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..63a0f497489773340e5ae1c9afea986064e39d21 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/bcd70602d56a2f46a9663b8b18aa7be1f8cc70645914af87662cc424f8bd98e1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d871e6dbe0558fab91096492e581156bd818e88a60005c1b8e85961974100c84 +size 1856 diff --git a/data/2025/2504_05xxx/2504.05979/images/bdfaed21c06e793694872cf312fb039cb068ab6d61c502ad781bc11ec2d4c06a.jpg b/data/2025/2504_05xxx/2504.05979/images/bdfaed21c06e793694872cf312fb039cb068ab6d61c502ad781bc11ec2d4c06a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f46539c1c9a1f2bc9338401a95d7d91874c3c50 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/bdfaed21c06e793694872cf312fb039cb068ab6d61c502ad781bc11ec2d4c06a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6062435da157e1d9130d694951736933f3740501472d4a584f942804e032217b +size 5552 diff --git a/data/2025/2504_05xxx/2504.05979/images/be6c16bf461ad6ab94168d3e79ef58fabfe4403b526f0dc3a29bc696d3d3531e.jpg b/data/2025/2504_05xxx/2504.05979/images/be6c16bf461ad6ab94168d3e79ef58fabfe4403b526f0dc3a29bc696d3d3531e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b58ad9769f29baf0d03e121bc1f1da3dc3207314 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/be6c16bf461ad6ab94168d3e79ef58fabfe4403b526f0dc3a29bc696d3d3531e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2ac87c1dc30ea0bf44f65cf49ce629c54e2e34a2e54eb77d9212e12b499031f +size 13170 diff --git a/data/2025/2504_05xxx/2504.05979/images/bfaf1c18b294719962fa4fcaa9024d109045581572e81ebad1ebe756236b696a.jpg b/data/2025/2504_05xxx/2504.05979/images/bfaf1c18b294719962fa4fcaa9024d109045581572e81ebad1ebe756236b696a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..24c63cba62ef2cd85da7c5273554fca680e8df7e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/bfaf1c18b294719962fa4fcaa9024d109045581572e81ebad1ebe756236b696a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:607e1096174031b388010fa9d806f2eb436bf28b1d6bf5b7d8ac32c69b1ee655 +size 16392 diff --git a/data/2025/2504_05xxx/2504.05979/images/bff611326012e121ed651fc6f65158c4598f63fd2b8ac3ecac83f8c3ed9bba15.jpg b/data/2025/2504_05xxx/2504.05979/images/bff611326012e121ed651fc6f65158c4598f63fd2b8ac3ecac83f8c3ed9bba15.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c72343cd57b3ae0b66dcdb7c99fa8262fdcfc15 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/bff611326012e121ed651fc6f65158c4598f63fd2b8ac3ecac83f8c3ed9bba15.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c3ee30c67cc1d1bfad47fab6d4b146299e7d43353b6eea976b859c7d50d1cc2 +size 11745 diff --git a/data/2025/2504_05xxx/2504.05979/images/bffe05ee07a77a1df7cf6cc2b2d3a0cb232aa99efd565f3002c170b0ea6c49e3.jpg b/data/2025/2504_05xxx/2504.05979/images/bffe05ee07a77a1df7cf6cc2b2d3a0cb232aa99efd565f3002c170b0ea6c49e3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e57fb33c106d50f5be34232917b86d0c52a6a1bf --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/bffe05ee07a77a1df7cf6cc2b2d3a0cb232aa99efd565f3002c170b0ea6c49e3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90fc415c119dcbb428034b621198654f9994b0f1b7de630e35ce0c4a7c027acd +size 6643 diff --git a/data/2025/2504_05xxx/2504.05979/images/c00e98505d0ec6544c9ab3f147c4ec2eb644ae9be7a7e5bf1a8d123c564b6686.jpg b/data/2025/2504_05xxx/2504.05979/images/c00e98505d0ec6544c9ab3f147c4ec2eb644ae9be7a7e5bf1a8d123c564b6686.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e4ff0d1bb68fea1d91d24bad5ac381adb0c9ba7b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c00e98505d0ec6544c9ab3f147c4ec2eb644ae9be7a7e5bf1a8d123c564b6686.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28ecddbf44c2de85cd38454ec43d03c837293466b8039c1f8f950a3db7883471 +size 6858 diff --git a/data/2025/2504_05xxx/2504.05979/images/c0365698147e513e49bef371ac74ee5fb93c63b6c1496ce9ac4635ce65cd901e.jpg b/data/2025/2504_05xxx/2504.05979/images/c0365698147e513e49bef371ac74ee5fb93c63b6c1496ce9ac4635ce65cd901e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7aa96ab885dd0e1140d3d43369c0d68e12d97a76 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c0365698147e513e49bef371ac74ee5fb93c63b6c1496ce9ac4635ce65cd901e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0e974de5299cc386cb146c4a01212bdaeb14f28c1a89a9715f5668f7f4186f0 +size 14154 diff --git a/data/2025/2504_05xxx/2504.05979/images/c037197eb3df793fe54ce69be58db82135ee54f307b277b1651e4aea252edd89.jpg b/data/2025/2504_05xxx/2504.05979/images/c037197eb3df793fe54ce69be58db82135ee54f307b277b1651e4aea252edd89.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c8642664ce1fcf2ea0a06cfefada7a2fa0efbd3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c037197eb3df793fe54ce69be58db82135ee54f307b277b1651e4aea252edd89.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc77823d2aba91eb18b1a1dc8540cdb8cf9be91068b05395671e171668d7100a +size 5606 diff --git a/data/2025/2504_05xxx/2504.05979/images/c03c17cff30fad0446b3d264e031d30a0f68b4ac841df4e2880f2381e0399ca9.jpg b/data/2025/2504_05xxx/2504.05979/images/c03c17cff30fad0446b3d264e031d30a0f68b4ac841df4e2880f2381e0399ca9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..da7278c85a7f64ca01255a2e2977735c4816c50a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c03c17cff30fad0446b3d264e031d30a0f68b4ac841df4e2880f2381e0399ca9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98f1588ccf44dbb3b4a1724d0c9120903548281e9547ec8077bec63c4155d588 +size 4433 diff --git a/data/2025/2504_05xxx/2504.05979/images/c090907878b87d590ef3ced70007db5dbbe66202ce9b1e00122e2b8df532d6b6.jpg b/data/2025/2504_05xxx/2504.05979/images/c090907878b87d590ef3ced70007db5dbbe66202ce9b1e00122e2b8df532d6b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3bb81e82b998f78c30c10623603d3b1f037da612 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c090907878b87d590ef3ced70007db5dbbe66202ce9b1e00122e2b8df532d6b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27142e846d9b8e4ba29b65fbd9c6e7b2c6ec4dcd4a637e011e43968d56077d2c +size 8670 diff --git a/data/2025/2504_05xxx/2504.05979/images/c0db26c61074cc97eac7d00434065dec4cfdf52edd3efc8af38cd021e086e4d3.jpg b/data/2025/2504_05xxx/2504.05979/images/c0db26c61074cc97eac7d00434065dec4cfdf52edd3efc8af38cd021e086e4d3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45b8b167fc013f7697c7a027708dd375bbb14064 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c0db26c61074cc97eac7d00434065dec4cfdf52edd3efc8af38cd021e086e4d3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de7150d1f286cb4b36b22395658ff476771ac70a4ae3d9f0044d0548290c9b7c +size 9892 diff --git a/data/2025/2504_05xxx/2504.05979/images/c13099707bf9fe0d5399ee37e5da600f88c1036e16f352a6e9ded8e89a045d2e.jpg b/data/2025/2504_05xxx/2504.05979/images/c13099707bf9fe0d5399ee37e5da600f88c1036e16f352a6e9ded8e89a045d2e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b94f7e732bfe07bc86f9aa467508e8c10f492202 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c13099707bf9fe0d5399ee37e5da600f88c1036e16f352a6e9ded8e89a045d2e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a65ac1f8caddee06095cc48b297783a69a46d3c188cdd7c41c2132f33467184 +size 6406 diff --git a/data/2025/2504_05xxx/2504.05979/images/c13377bc306094edb2168ae5ee5698264067e0fb2c892c3a361247e6688d7d37.jpg b/data/2025/2504_05xxx/2504.05979/images/c13377bc306094edb2168ae5ee5698264067e0fb2c892c3a361247e6688d7d37.jpg new file mode 100644 index 0000000000000000000000000000000000000000..43ca48a5c66ee3369d585710a7bcc16c7df5a80f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c13377bc306094edb2168ae5ee5698264067e0fb2c892c3a361247e6688d7d37.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4c5f1dec1c5f597f76db23ccb0cbd15a6c826c903066b9e88704f2722debfbf +size 13387 diff --git a/data/2025/2504_05xxx/2504.05979/images/c159dda7b72f6548e3a35745859137667be43c3d3382e30061df21040584ebd3.jpg b/data/2025/2504_05xxx/2504.05979/images/c159dda7b72f6548e3a35745859137667be43c3d3382e30061df21040584ebd3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bfab9ff3d6cd0d40b9250b1d99941f089310f476 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c159dda7b72f6548e3a35745859137667be43c3d3382e30061df21040584ebd3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3076755e813e52667c77ddefa7041d7e091a92d90f6dd86a917b27ef6e3f4f75 +size 10275 diff --git a/data/2025/2504_05xxx/2504.05979/images/c1871f48314d2af2fd2dfd3ce2bf7c8ce6dd5d5432b0c3771e9206b1cb6dbbef.jpg b/data/2025/2504_05xxx/2504.05979/images/c1871f48314d2af2fd2dfd3ce2bf7c8ce6dd5d5432b0c3771e9206b1cb6dbbef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54db80d6dd9584864568d3cd540bc9466e17f198 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c1871f48314d2af2fd2dfd3ce2bf7c8ce6dd5d5432b0c3771e9206b1cb6dbbef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:571e227406409947df111c08cb6b3c4b79e16b941809ccf073100e2fd9f9470c +size 8526 diff --git a/data/2025/2504_05xxx/2504.05979/images/c2af40c5244e0f72a0326fc6c92a7e15810a0e852285d96d92e3191254cf5eaa.jpg b/data/2025/2504_05xxx/2504.05979/images/c2af40c5244e0f72a0326fc6c92a7e15810a0e852285d96d92e3191254cf5eaa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cb05d3c3357040aac270d42bf13186a8d72fc05 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c2af40c5244e0f72a0326fc6c92a7e15810a0e852285d96d92e3191254cf5eaa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:337775adcb4b6c891e204d81d8d7de0d7e21f164e59845f040969e9dc7a3a170 +size 5837 diff --git a/data/2025/2504_05xxx/2504.05979/images/c2e5c471cced0e2bc19cbb418990047f6502461f9ab068015212292ab802d155.jpg b/data/2025/2504_05xxx/2504.05979/images/c2e5c471cced0e2bc19cbb418990047f6502461f9ab068015212292ab802d155.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ceda6cbf84fa6aec5a7ffd9a75775b07c7c5925f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c2e5c471cced0e2bc19cbb418990047f6502461f9ab068015212292ab802d155.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:600ae4ab6d54e3e84726c619cd56c3bec2d59814327cd23d9de5c1ae7fe6697c +size 9320 diff --git a/data/2025/2504_05xxx/2504.05979/images/c30bd99d0cf615babc90bf1be839dd3875329913446e1f8f05070addcf8105c3.jpg b/data/2025/2504_05xxx/2504.05979/images/c30bd99d0cf615babc90bf1be839dd3875329913446e1f8f05070addcf8105c3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..13fbc576414776a164655ee4d177890f037bb839 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c30bd99d0cf615babc90bf1be839dd3875329913446e1f8f05070addcf8105c3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a74f8e085ff22d8bad86423f616c42c4b789bcfb43e33f9f8b785cfb4ee3970 +size 4611 diff --git a/data/2025/2504_05xxx/2504.05979/images/c33450ac171eaded049f91066f0deeba4af786415bc6dd5d653dc55c54b3ee41.jpg b/data/2025/2504_05xxx/2504.05979/images/c33450ac171eaded049f91066f0deeba4af786415bc6dd5d653dc55c54b3ee41.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bc4219d55913dfede5a01052244478c7c2fb34b0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c33450ac171eaded049f91066f0deeba4af786415bc6dd5d653dc55c54b3ee41.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b59d111b57da44836b3482e4ed7acd37d54fb4ad074aab28f01709198222ef0 +size 11184 diff --git a/data/2025/2504_05xxx/2504.05979/images/c337d9db266ab1888bf435abcc2ea4dfede05853a0b6a6b5906b13f424c95dc6.jpg b/data/2025/2504_05xxx/2504.05979/images/c337d9db266ab1888bf435abcc2ea4dfede05853a0b6a6b5906b13f424c95dc6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea638f34062d431b32d0c4e1628620ff7148330f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c337d9db266ab1888bf435abcc2ea4dfede05853a0b6a6b5906b13f424c95dc6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80208a7c2e34a52bcd7fe8bbf1822afa47942f330c4b9861566d224ed3413f04 +size 13751 diff --git a/data/2025/2504_05xxx/2504.05979/images/c341891a5dd74ee440ca81c75da11adba9b78fd29a19903df5c98dbac7513e19.jpg b/data/2025/2504_05xxx/2504.05979/images/c341891a5dd74ee440ca81c75da11adba9b78fd29a19903df5c98dbac7513e19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4818181afadcb96ce060adac46b16522a5e4af30 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c341891a5dd74ee440ca81c75da11adba9b78fd29a19903df5c98dbac7513e19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2595829bec29dcebbeecbb89aea9280aa0380f92d21cd405a77fe614ae6c7343 +size 16487 diff --git a/data/2025/2504_05xxx/2504.05979/images/c34d4877edd4f942f60dcc2c4527c7ca7b8db7854520d882872d6a05a6e0c924.jpg b/data/2025/2504_05xxx/2504.05979/images/c34d4877edd4f942f60dcc2c4527c7ca7b8db7854520d882872d6a05a6e0c924.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e39eeb840e0aaf6a0b7319753e4cc4f7e81d9037 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c34d4877edd4f942f60dcc2c4527c7ca7b8db7854520d882872d6a05a6e0c924.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f5ab03c2076a3f102454b45647cfca2cf0b625980286f5efb8de9a690fcda7b +size 17627 diff --git a/data/2025/2504_05xxx/2504.05979/images/c36a7bcd11820fd2f7d3c63fbf0e563dbdebdbcf2828507130fec8b86c2c1cc1.jpg b/data/2025/2504_05xxx/2504.05979/images/c36a7bcd11820fd2f7d3c63fbf0e563dbdebdbcf2828507130fec8b86c2c1cc1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..534bba06385439a642b08c832d624f85f205965a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c36a7bcd11820fd2f7d3c63fbf0e563dbdebdbcf2828507130fec8b86c2c1cc1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fdd0146fbbca76cabc1d55c8a035ee5bd5ddc81d70eed6f4cb2cc2c3ce8f296 +size 15049 diff --git a/data/2025/2504_05xxx/2504.05979/images/c3fac1a22608550f62bd5e74998a43c8ae3cd590d4dfa1647701dea7d93c7ce8.jpg b/data/2025/2504_05xxx/2504.05979/images/c3fac1a22608550f62bd5e74998a43c8ae3cd590d4dfa1647701dea7d93c7ce8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cb9fbde19c1e53dfd480c374be8c32f9c094fa13 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c3fac1a22608550f62bd5e74998a43c8ae3cd590d4dfa1647701dea7d93c7ce8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43f2414d75d10d9b1cabd6298632fc181f70119a3707e8008418ae86c6e72307 +size 19959 diff --git a/data/2025/2504_05xxx/2504.05979/images/c437cfe35c54814ac08dea9c46d0e6240fb84f38e9174f214d7a634c29fe2495.jpg b/data/2025/2504_05xxx/2504.05979/images/c437cfe35c54814ac08dea9c46d0e6240fb84f38e9174f214d7a634c29fe2495.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b9dd22bdca276936dd2cffa02b49977686a56491 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c437cfe35c54814ac08dea9c46d0e6240fb84f38e9174f214d7a634c29fe2495.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e793981b67692d1c37c9dc7aca6c44e8e6ee4f28cbd282e504ff431422002991 +size 7051 diff --git a/data/2025/2504_05xxx/2504.05979/images/c464a9f6f3f24ab0f65f5488c132ccea546a4af9ede651088ae6e7a2961516ec.jpg b/data/2025/2504_05xxx/2504.05979/images/c464a9f6f3f24ab0f65f5488c132ccea546a4af9ede651088ae6e7a2961516ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a258cc030e7330c5206e1d84f418cf89549c664 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c464a9f6f3f24ab0f65f5488c132ccea546a4af9ede651088ae6e7a2961516ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15d479fa318dff2c9cfccc17b73ccb7cb486fdd7ebd946da2d58f3a271a0b6c1 +size 11620 diff --git a/data/2025/2504_05xxx/2504.05979/images/c46b403e7ac2e945512ffa6d309f1e6f20a355598593e3758059233cf042e2e6.jpg b/data/2025/2504_05xxx/2504.05979/images/c46b403e7ac2e945512ffa6d309f1e6f20a355598593e3758059233cf042e2e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9df74b581b06639cc63a2cb3036b680b4b3316a2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c46b403e7ac2e945512ffa6d309f1e6f20a355598593e3758059233cf042e2e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf3a2f4fc00e9c71bf498fbacc88e49a690af98c73fc3cca29b78e97a88cf1c8 +size 13653 diff --git a/data/2025/2504_05xxx/2504.05979/images/c4a439744d0603cca113101ddccdd39ff6f86154f75db50db717e2e21c43d890.jpg b/data/2025/2504_05xxx/2504.05979/images/c4a439744d0603cca113101ddccdd39ff6f86154f75db50db717e2e21c43d890.jpg new file mode 100644 index 0000000000000000000000000000000000000000..662b692fba7c07752bdab2defdcddf534b85377a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c4a439744d0603cca113101ddccdd39ff6f86154f75db50db717e2e21c43d890.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47450bf3c4a652eceb177b779d3d92ed5289de982b6ed7e1f3de9c975d9f2586 +size 13988 diff --git a/data/2025/2504_05xxx/2504.05979/images/c4aa3522a28b388224935e752868d0c11d026b182a9b1476c099f85eecf8b27d.jpg b/data/2025/2504_05xxx/2504.05979/images/c4aa3522a28b388224935e752868d0c11d026b182a9b1476c099f85eecf8b27d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..22cd771b70a9051a5a91af13040d07d4f59af43e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c4aa3522a28b388224935e752868d0c11d026b182a9b1476c099f85eecf8b27d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23c044882077f5720e4728aec1d0e0c728725f51b6356c8c94a93f2b28d5ba49 +size 13739 diff --git a/data/2025/2504_05xxx/2504.05979/images/c52ea48115f508872bd5b5ec87bc06b52622f4b6d3cea22c4f58d26b4d869481.jpg b/data/2025/2504_05xxx/2504.05979/images/c52ea48115f508872bd5b5ec87bc06b52622f4b6d3cea22c4f58d26b4d869481.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fbf55054a6848544d21ad7aae2676e738ed47544 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c52ea48115f508872bd5b5ec87bc06b52622f4b6d3cea22c4f58d26b4d869481.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abb844c26324b1b5b85927c0f50c33e76b69ec304c25b72864d283921e7694d7 +size 11212 diff --git a/data/2025/2504_05xxx/2504.05979/images/c55187e3cf1b46125c32de8fa1ced6d268639035921d9b205dcf8ee53eaef80f.jpg b/data/2025/2504_05xxx/2504.05979/images/c55187e3cf1b46125c32de8fa1ced6d268639035921d9b205dcf8ee53eaef80f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7bfa794cd113c325375633215ab5bd0b361a6738 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c55187e3cf1b46125c32de8fa1ced6d268639035921d9b205dcf8ee53eaef80f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb6e2f5ec3ebe972793650e6110b5722dfe650f9364e5cfacf2f011b01e5de3a +size 4936 diff --git a/data/2025/2504_05xxx/2504.05979/images/c5582fe68220a5a384efa294ef35989ebef4760bd5b2897bd0ee23ceffe344a0.jpg b/data/2025/2504_05xxx/2504.05979/images/c5582fe68220a5a384efa294ef35989ebef4760bd5b2897bd0ee23ceffe344a0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..66c288b82fe28331ee5466572afae8d314c93da5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c5582fe68220a5a384efa294ef35989ebef4760bd5b2897bd0ee23ceffe344a0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2621c5840999c6dc85772b2952fe527b7cd698fc6ea2fa98df64c0396cd65115 +size 22653 diff --git a/data/2025/2504_05xxx/2504.05979/images/c55a85d62ceb01b432fd7559421cb62d05bd7e42574b82a7254f0ddc252a5fb1.jpg b/data/2025/2504_05xxx/2504.05979/images/c55a85d62ceb01b432fd7559421cb62d05bd7e42574b82a7254f0ddc252a5fb1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c33aa69caf106de8ae85e7286772d7c4fe5cc7e2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c55a85d62ceb01b432fd7559421cb62d05bd7e42574b82a7254f0ddc252a5fb1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a549980de1e85e0623e2d3595901ddbf0806b0cf4aa5a3fcf27a108e9b84135 +size 7181 diff --git a/data/2025/2504_05xxx/2504.05979/images/c57774706ddf76b7eafe1bc39859e9e41d2e9be8a2ed40d9b69c030d906ac930.jpg b/data/2025/2504_05xxx/2504.05979/images/c57774706ddf76b7eafe1bc39859e9e41d2e9be8a2ed40d9b69c030d906ac930.jpg new file mode 100644 index 0000000000000000000000000000000000000000..06b55f4df0f90709c5a67a2a977dfd8be5ae2578 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c57774706ddf76b7eafe1bc39859e9e41d2e9be8a2ed40d9b69c030d906ac930.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:449edd037d6d9c1100452d7b57ad1bf1ed03494b7a62414f523c634004b2133e +size 11714 diff --git a/data/2025/2504_05xxx/2504.05979/images/c5822478e8f8995312ffe08cd952a629abcc29449b18ab827236ad66d1633b87.jpg b/data/2025/2504_05xxx/2504.05979/images/c5822478e8f8995312ffe08cd952a629abcc29449b18ab827236ad66d1633b87.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cf0448c4186e6ee09238b0c262ad0b2332c0035b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c5822478e8f8995312ffe08cd952a629abcc29449b18ab827236ad66d1633b87.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7cbed8d1c7702ab481eb68a2d66a1d985ecd454603b21d13dd3687b968efb85 +size 14857 diff --git a/data/2025/2504_05xxx/2504.05979/images/c5d213403e828619e550ddf5ada406e961fa2efb34275366ed904f09ea4b9216.jpg b/data/2025/2504_05xxx/2504.05979/images/c5d213403e828619e550ddf5ada406e961fa2efb34275366ed904f09ea4b9216.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb11695c5c600acae3a3e64fcd589a695f52c858 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c5d213403e828619e550ddf5ada406e961fa2efb34275366ed904f09ea4b9216.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f06663298ede5b4623ba525331de188d303b0373b8428343c473f095a26ace15 +size 9290 diff --git a/data/2025/2504_05xxx/2504.05979/images/c5e565189347123ae969a22fa0002406f12393da53fa69379e6fbe9b7fbaddfa.jpg b/data/2025/2504_05xxx/2504.05979/images/c5e565189347123ae969a22fa0002406f12393da53fa69379e6fbe9b7fbaddfa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9b20494db6657574e02cd0119fd611860f097f59 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c5e565189347123ae969a22fa0002406f12393da53fa69379e6fbe9b7fbaddfa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:729b2b2adb22b46e545434559e5291b099889b7955bbc8f611c123bd50e5618c +size 4086 diff --git a/data/2025/2504_05xxx/2504.05979/images/c5ff8325e12ed6030f6fd0a88c40d2600c49d7423ec72c29d20775bcecc460a1.jpg b/data/2025/2504_05xxx/2504.05979/images/c5ff8325e12ed6030f6fd0a88c40d2600c49d7423ec72c29d20775bcecc460a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a60fbcb7fe980b286275592f9514100d7014cade --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c5ff8325e12ed6030f6fd0a88c40d2600c49d7423ec72c29d20775bcecc460a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83288cfd366121114f977799eb7563386a252a3239bc00e60d135cbcd39b6680 +size 8151 diff --git a/data/2025/2504_05xxx/2504.05979/images/c65293d5487451040c046c09049d817ac48c8d7e2a82e633e0d317f853e37512.jpg b/data/2025/2504_05xxx/2504.05979/images/c65293d5487451040c046c09049d817ac48c8d7e2a82e633e0d317f853e37512.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0e0d3141660b3fed298d29b5a9db8fa20023342 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c65293d5487451040c046c09049d817ac48c8d7e2a82e633e0d317f853e37512.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7caf4b078a8c2e434b2353dc630cb7aae0af07dbb93872ff0369d1d598c72de +size 7157 diff --git a/data/2025/2504_05xxx/2504.05979/images/c65cc8644013933cd8a0d822cb884ca3b9ab0b6f9e149296ab44724e475fa681.jpg b/data/2025/2504_05xxx/2504.05979/images/c65cc8644013933cd8a0d822cb884ca3b9ab0b6f9e149296ab44724e475fa681.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45e846d74954a14096d5b6962155adabf9f5397a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c65cc8644013933cd8a0d822cb884ca3b9ab0b6f9e149296ab44724e475fa681.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e9ca661a34645f66ae6fb0ac0574521b569e82d30f604d5d305976dc80876aa +size 10539 diff --git a/data/2025/2504_05xxx/2504.05979/images/c667c273e0d72e65f57befa30d7eac7de39e3dd5500f8b8982bf964a2af0bfd0.jpg b/data/2025/2504_05xxx/2504.05979/images/c667c273e0d72e65f57befa30d7eac7de39e3dd5500f8b8982bf964a2af0bfd0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8186502db3d6966e0bc7dee3d20f0f3bbff6323c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c667c273e0d72e65f57befa30d7eac7de39e3dd5500f8b8982bf964a2af0bfd0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fae521c6da93218f02f3274e8efd33d56ef8c1afba1b3a668dcc3ae9d4c6bbb7 +size 15581 diff --git a/data/2025/2504_05xxx/2504.05979/images/c6b354d42faa888d692b35bdfbc5cd4599018152cf0064da6cd13e513960b86e.jpg b/data/2025/2504_05xxx/2504.05979/images/c6b354d42faa888d692b35bdfbc5cd4599018152cf0064da6cd13e513960b86e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..343cf84faced7b17b0a39660a8077e70a0cc188e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c6b354d42faa888d692b35bdfbc5cd4599018152cf0064da6cd13e513960b86e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8816a3dde8da9155d399c0c7c53687e2ca09923926c59f7f382d8794e4d4e92e +size 14643 diff --git a/data/2025/2504_05xxx/2504.05979/images/c720d7639945658d54f942ed378b4e165f730b8de7a268e21417078ae525472e.jpg b/data/2025/2504_05xxx/2504.05979/images/c720d7639945658d54f942ed378b4e165f730b8de7a268e21417078ae525472e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7db3b35489ac400d84d03f2ddee42219b47d18c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c720d7639945658d54f942ed378b4e165f730b8de7a268e21417078ae525472e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22bc75035c3308e58dcd2f60ebf9e6c9107e52a66b1b7b80b8458d8a6c7c6122 +size 13480 diff --git a/data/2025/2504_05xxx/2504.05979/images/c73e3aa340f33cab4cf19354249e37fe1bd7f76aef6ec7fbcc154198f1dad05b.jpg b/data/2025/2504_05xxx/2504.05979/images/c73e3aa340f33cab4cf19354249e37fe1bd7f76aef6ec7fbcc154198f1dad05b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aad68626d0d9b0d2da7aa0df04b3c19821b9ec5e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c73e3aa340f33cab4cf19354249e37fe1bd7f76aef6ec7fbcc154198f1dad05b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46f426ce988260b4151472d4e8e43e078d18c8c3d494e8952b1af651fbb20187 +size 22778 diff --git a/data/2025/2504_05xxx/2504.05979/images/c748048db9bc4e6543e0723fd2782186695eaa29379aa62ac724941790f330f9.jpg b/data/2025/2504_05xxx/2504.05979/images/c748048db9bc4e6543e0723fd2782186695eaa29379aa62ac724941790f330f9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b9e4ef9586cad08e0da231ad06923748baeea3f6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c748048db9bc4e6543e0723fd2782186695eaa29379aa62ac724941790f330f9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d5de4dc6430364475ada5ca40e2444e99d04ab34881f37ee95e2453cf515a2a +size 13932 diff --git a/data/2025/2504_05xxx/2504.05979/images/c7a9ad7f6f46ee29f8dc6c06aea221cccb760335b906b633f9630413215ff700.jpg b/data/2025/2504_05xxx/2504.05979/images/c7a9ad7f6f46ee29f8dc6c06aea221cccb760335b906b633f9630413215ff700.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a59eecaf1610d8416d2866d156c096b88fee6d9d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c7a9ad7f6f46ee29f8dc6c06aea221cccb760335b906b633f9630413215ff700.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7b86c72a26b66da27cf0fd97d7db6a5a43bae16169acff47d7ebeeaa2f18976 +size 10415 diff --git a/data/2025/2504_05xxx/2504.05979/images/c817252ab152b2bdf4e05c1e86107c2ae73fac4e18e54c85852a65c86d6eb4ff.jpg b/data/2025/2504_05xxx/2504.05979/images/c817252ab152b2bdf4e05c1e86107c2ae73fac4e18e54c85852a65c86d6eb4ff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..06b1731f4566884d680f03cb5fd80c863bc09c2d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c817252ab152b2bdf4e05c1e86107c2ae73fac4e18e54c85852a65c86d6eb4ff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:463403465a28c5e60a36d879d85e8fec847601aa6088886aa91d2edccb99015e +size 13813 diff --git a/data/2025/2504_05xxx/2504.05979/images/c86c4b988057b5e5ca08b92e4c09d8111a097f908d27efcd8da7c2ac29c5583a.jpg b/data/2025/2504_05xxx/2504.05979/images/c86c4b988057b5e5ca08b92e4c09d8111a097f908d27efcd8da7c2ac29c5583a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9b7632fe5ee08e02476ba0e1b837e955f47a230d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c86c4b988057b5e5ca08b92e4c09d8111a097f908d27efcd8da7c2ac29c5583a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72a081e91aea91a366de42ea7264a55415ad83a7b5215ff723efda1240b6c352 +size 11684 diff --git a/data/2025/2504_05xxx/2504.05979/images/c87b7b6792c5fda9d5340959a376273a7f5d0b80db6770aea6ec9e15c6ec596d.jpg b/data/2025/2504_05xxx/2504.05979/images/c87b7b6792c5fda9d5340959a376273a7f5d0b80db6770aea6ec9e15c6ec596d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..438298a4c8080eaca4a13edf9b4152261d89fcf4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c87b7b6792c5fda9d5340959a376273a7f5d0b80db6770aea6ec9e15c6ec596d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26093f642c00d48d4a74139241fd93aaf0a751c94c83c3ce99498588830f2f35 +size 23330 diff --git a/data/2025/2504_05xxx/2504.05979/images/c8841acf64bd37b3e001194394bdc34bdbf3ce6e360349fc571e6d6f84b0e03a.jpg b/data/2025/2504_05xxx/2504.05979/images/c8841acf64bd37b3e001194394bdc34bdbf3ce6e360349fc571e6d6f84b0e03a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..027c44179fe42f7ea8187399b399e18650542b3c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c8841acf64bd37b3e001194394bdc34bdbf3ce6e360349fc571e6d6f84b0e03a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08533216fda4d99e2cbfb513d9b083127fa5ac8b67ab90a3ed450a7065f96231 +size 14673 diff --git a/data/2025/2504_05xxx/2504.05979/images/c8ad21ac9388fd5c3e7f05f8a3c2192c73560b7737abef6737af98782aedeab8.jpg b/data/2025/2504_05xxx/2504.05979/images/c8ad21ac9388fd5c3e7f05f8a3c2192c73560b7737abef6737af98782aedeab8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6335e051b6dedcfe99fec2a29a277f062b8fdfd7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c8ad21ac9388fd5c3e7f05f8a3c2192c73560b7737abef6737af98782aedeab8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d405cdac86e2313ee64b80b1ee91c5457d02a72aec5680e65b25dac1a771fc4 +size 2391 diff --git a/data/2025/2504_05xxx/2504.05979/images/c8b6d0e8d66c6539cd6cfb4284248840031b6937e228c7f897330acc92dadf26.jpg b/data/2025/2504_05xxx/2504.05979/images/c8b6d0e8d66c6539cd6cfb4284248840031b6937e228c7f897330acc92dadf26.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5aef2c45a0126ee2f5183e656e37148537a2c36c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c8b6d0e8d66c6539cd6cfb4284248840031b6937e228c7f897330acc92dadf26.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cd6991b0fffc961b95dc22af52db2e1c36485ec7964163180b3f834eb59390b +size 9533 diff --git a/data/2025/2504_05xxx/2504.05979/images/c9364ac74a276a68b6a4096cb5d76160b136e7879dc4af294489f3c2d5723738.jpg b/data/2025/2504_05xxx/2504.05979/images/c9364ac74a276a68b6a4096cb5d76160b136e7879dc4af294489f3c2d5723738.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e1d656d25b6e344c6d3f3d67acd149c7c15d62a1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c9364ac74a276a68b6a4096cb5d76160b136e7879dc4af294489f3c2d5723738.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:929df9767100ffb60dabaeefb9a1de1743d142a2b913a35e2bed8fcc67914626 +size 9755 diff --git a/data/2025/2504_05xxx/2504.05979/images/c9580af54f8032d511fbb97fa1439c5960b355c6d89a6da9548e03d6da674129.jpg b/data/2025/2504_05xxx/2504.05979/images/c9580af54f8032d511fbb97fa1439c5960b355c6d89a6da9548e03d6da674129.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b9c984f00f3ec33c2ebcc270268ef4d02c04643 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c9580af54f8032d511fbb97fa1439c5960b355c6d89a6da9548e03d6da674129.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1237779408f028cb6fd1920d02212874284883633a0ff32ded2d2b39b259ea46 +size 7680 diff --git a/data/2025/2504_05xxx/2504.05979/images/c99eb6ee2f33a13babe23a2160bddcbb33b49dbaaa63c5c80ad3c9928ada400c.jpg b/data/2025/2504_05xxx/2504.05979/images/c99eb6ee2f33a13babe23a2160bddcbb33b49dbaaa63c5c80ad3c9928ada400c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4128b08539a3e8dec8e35dfa9b8b72bd833a1a04 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c99eb6ee2f33a13babe23a2160bddcbb33b49dbaaa63c5c80ad3c9928ada400c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a15ecbd1ebe885085043aa0681f173ba138fbdf5bd27859663395c720ce31ffe +size 22512 diff --git a/data/2025/2504_05xxx/2504.05979/images/c9d161d0a7f9636e0f15f7cf8b38ef3cde507b88822b80d1c054fc17965684c8.jpg b/data/2025/2504_05xxx/2504.05979/images/c9d161d0a7f9636e0f15f7cf8b38ef3cde507b88822b80d1c054fc17965684c8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..96022785f4888f1e83258ea158c342bb24ef21d6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c9d161d0a7f9636e0f15f7cf8b38ef3cde507b88822b80d1c054fc17965684c8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19ad9696088f8367005debac2d4b5bdbdc6fc45c75525beda7effc286e25ca65 +size 12084 diff --git a/data/2025/2504_05xxx/2504.05979/images/c9e2f0970eb2e881c87f2518165ec8fafd6765bc14d8e17f59829268ad6411c8.jpg b/data/2025/2504_05xxx/2504.05979/images/c9e2f0970eb2e881c87f2518165ec8fafd6765bc14d8e17f59829268ad6411c8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..247e73585d37a1dcaf580e084bdd0620e52b0959 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/c9e2f0970eb2e881c87f2518165ec8fafd6765bc14d8e17f59829268ad6411c8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ecc9abb511f1a5d4ec39e36a7719caaf93c979ebdb28e840df7ead4d9d84d71d +size 17148 diff --git a/data/2025/2504_05xxx/2504.05979/images/ca0e9e3e4ad44d1a4998e2e53190039767bd351173e262b703bb663bf16e4dd2.jpg b/data/2025/2504_05xxx/2504.05979/images/ca0e9e3e4ad44d1a4998e2e53190039767bd351173e262b703bb663bf16e4dd2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dff1c77f5231eacfc6b34310c36abfc32b37e2e3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ca0e9e3e4ad44d1a4998e2e53190039767bd351173e262b703bb663bf16e4dd2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea9963fb7a08bb2a5dd5d3f7f1947042c0ddeecc32f6431a38bf31c3c3b1d481 +size 1000 diff --git a/data/2025/2504_05xxx/2504.05979/images/ca7cc128d79c704808bc1637b90a08b777ee9b0baab8865ec66e337610c7be1e.jpg b/data/2025/2504_05xxx/2504.05979/images/ca7cc128d79c704808bc1637b90a08b777ee9b0baab8865ec66e337610c7be1e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3751b0163d8a3eb0fee09e927859a1487a9bd49c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ca7cc128d79c704808bc1637b90a08b777ee9b0baab8865ec66e337610c7be1e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d90f941b6b9f2451c340f41c2bdc3e3a3b87bb4fbd7f1ed17ad33f3a50b97ee8 +size 5660 diff --git a/data/2025/2504_05xxx/2504.05979/images/caed113ea41a0bbb54c520a911d0e22a31ace6434333320e221dd66e5f86ef33.jpg b/data/2025/2504_05xxx/2504.05979/images/caed113ea41a0bbb54c520a911d0e22a31ace6434333320e221dd66e5f86ef33.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9be0dcfa78e0e4be0609711506b0ab0bf743d72c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/caed113ea41a0bbb54c520a911d0e22a31ace6434333320e221dd66e5f86ef33.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f550ab1442d47ea5f964659f793b4073501b2fecce07176376d2b5e93b729e1 +size 8146 diff --git a/data/2025/2504_05xxx/2504.05979/images/cb1c6099278ab7785abb32f0832616b8e30bed5c7c7f8049237a2dfc3dbc2c4a.jpg b/data/2025/2504_05xxx/2504.05979/images/cb1c6099278ab7785abb32f0832616b8e30bed5c7c7f8049237a2dfc3dbc2c4a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2cc23f71e9dad773f9e280e97c5707d12d40fb52 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/cb1c6099278ab7785abb32f0832616b8e30bed5c7c7f8049237a2dfc3dbc2c4a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d77dd6bdc75ff16de240822ee9412007e74f268af11034a55c200defa1cef42 +size 7036 diff --git a/data/2025/2504_05xxx/2504.05979/images/cb7b5802882687334bb83b37842a711b7d6066ee9b162b7773203fa931b58dae.jpg b/data/2025/2504_05xxx/2504.05979/images/cb7b5802882687334bb83b37842a711b7d6066ee9b162b7773203fa931b58dae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f745e2948149bf53cb749e80bd0a4c90941f930 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/cb7b5802882687334bb83b37842a711b7d6066ee9b162b7773203fa931b58dae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3bd85fb0480e8df4e0b4dc2ca53f061a2f2751ed193d22b9fe30eafdead10c1 +size 9355 diff --git a/data/2025/2504_05xxx/2504.05979/images/cb9fbff8e79609bd0c0397e9ff8bdd26c4934e0c043bf90309793e247c53acde.jpg b/data/2025/2504_05xxx/2504.05979/images/cb9fbff8e79609bd0c0397e9ff8bdd26c4934e0c043bf90309793e247c53acde.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1442ed03880f35e8c5aa5eafa762b663c5abab43 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/cb9fbff8e79609bd0c0397e9ff8bdd26c4934e0c043bf90309793e247c53acde.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f02f042ed2b55202a4b4ce8aba243bee03f518ff52f57b18fc9fd953fb03d61a +size 13923 diff --git a/data/2025/2504_05xxx/2504.05979/images/cbc8f5122209a93c7fcd5c01f787a5328fd12da73ba30b8fe0fa36d8089f19ee.jpg b/data/2025/2504_05xxx/2504.05979/images/cbc8f5122209a93c7fcd5c01f787a5328fd12da73ba30b8fe0fa36d8089f19ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1631b8d1e1dc20b8167f757faad28542cef3bb7c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/cbc8f5122209a93c7fcd5c01f787a5328fd12da73ba30b8fe0fa36d8089f19ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84b5b1e8487292149b49f0907226cf60620acc5f2a6868c7cd00e479d0c214b2 +size 12507 diff --git a/data/2025/2504_05xxx/2504.05979/images/cbefa33dacb0fcc87c8295a83dda71231a6f564b3f6a39b28e9b44a76d014c28.jpg b/data/2025/2504_05xxx/2504.05979/images/cbefa33dacb0fcc87c8295a83dda71231a6f564b3f6a39b28e9b44a76d014c28.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2dfb504e42f30109d7859f9740840cf851cbd1aa --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/cbefa33dacb0fcc87c8295a83dda71231a6f564b3f6a39b28e9b44a76d014c28.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2826ec11364b628c95c44b397d2194158efaa778328cfc65e06d3b8e6c5ee343 +size 6902 diff --git a/data/2025/2504_05xxx/2504.05979/images/cc09b3c0d84509e3bc312aabe02473313c5d4fecd18fd590ee4eaa3bf1153770.jpg b/data/2025/2504_05xxx/2504.05979/images/cc09b3c0d84509e3bc312aabe02473313c5d4fecd18fd590ee4eaa3bf1153770.jpg new file mode 100644 index 0000000000000000000000000000000000000000..81b0c608de57515bd2b10e00ca48c68683cdffb1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/cc09b3c0d84509e3bc312aabe02473313c5d4fecd18fd590ee4eaa3bf1153770.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aac5f28bc9e377b4ddbb89c3565afec502f20ce74405fb97b7ae9f560ad9ac53 +size 3867 diff --git a/data/2025/2504_05xxx/2504.05979/images/cc99e1fe7082b4516a42bfa326f45cb5f3fae6ae0f81c321344103d47d80cbb9.jpg b/data/2025/2504_05xxx/2504.05979/images/cc99e1fe7082b4516a42bfa326f45cb5f3fae6ae0f81c321344103d47d80cbb9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b01432f343f2df61eedec8dc600ba1496d618882 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/cc99e1fe7082b4516a42bfa326f45cb5f3fae6ae0f81c321344103d47d80cbb9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b7cc0f6e170efc6d0125a1c477988c12429708a03fab51eca9d1a5d83e38f62 +size 11316 diff --git a/data/2025/2504_05xxx/2504.05979/images/cca5f714390c8193eca135b0bd9a7e4da0d407fca7b6a93aba9022833c487d0b.jpg b/data/2025/2504_05xxx/2504.05979/images/cca5f714390c8193eca135b0bd9a7e4da0d407fca7b6a93aba9022833c487d0b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cc3aa243b02e59de7393e682501f25c86debb0fd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/cca5f714390c8193eca135b0bd9a7e4da0d407fca7b6a93aba9022833c487d0b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44b7c8ef8760ebdf907f524f0f9435f07a636443661783bf77b5cce4f4bcd254 +size 11943 diff --git a/data/2025/2504_05xxx/2504.05979/images/cce42e0f3762b76b5639e2686fa7e95138a6dafc58db634f3dc3c0d47aebd717.jpg b/data/2025/2504_05xxx/2504.05979/images/cce42e0f3762b76b5639e2686fa7e95138a6dafc58db634f3dc3c0d47aebd717.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c2ed5b44c6c880266997c72df1b7134b8e17abe1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/cce42e0f3762b76b5639e2686fa7e95138a6dafc58db634f3dc3c0d47aebd717.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52601171385bebbb8424fc029354c0ffa6602be8b584c9cdf64550ea2f3f1db3 +size 9049 diff --git a/data/2025/2504_05xxx/2504.05979/images/ccf71f9041dc2d2f4458a2e0113d7a09f60467f15834ca798f39accb6bbf34e8.jpg b/data/2025/2504_05xxx/2504.05979/images/ccf71f9041dc2d2f4458a2e0113d7a09f60467f15834ca798f39accb6bbf34e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dbf198c9758a3c240ebe35abdef32680d13ea432 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ccf71f9041dc2d2f4458a2e0113d7a09f60467f15834ca798f39accb6bbf34e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:541ecbaf9531d5f74052be11185f785e56327e8980391f8b9964105cba00af42 +size 2005 diff --git a/data/2025/2504_05xxx/2504.05979/images/cdc522f7841c2077f741a6f72a86ede9c15c561e6b84d17afd15ae44085b29cf.jpg b/data/2025/2504_05xxx/2504.05979/images/cdc522f7841c2077f741a6f72a86ede9c15c561e6b84d17afd15ae44085b29cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5119f89d9c14eaac65139f1910f2a7c332c6bac3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/cdc522f7841c2077f741a6f72a86ede9c15c561e6b84d17afd15ae44085b29cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee592bf7197ce60d65b2e11c41cb293f52e2dfbdb130667c22756c95f2296702 +size 11013 diff --git a/data/2025/2504_05xxx/2504.05979/images/cde0ac3d92eb933391cdf6879af267478293459d8efb075cb2c215e135eaf5de.jpg b/data/2025/2504_05xxx/2504.05979/images/cde0ac3d92eb933391cdf6879af267478293459d8efb075cb2c215e135eaf5de.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1775355e362898b5b3bcfb28b26cdbe9379326be --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/cde0ac3d92eb933391cdf6879af267478293459d8efb075cb2c215e135eaf5de.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a026e1113a4c6249c09fb34e96297ff029aa2e8b6a9d9929c40a83a6606e8b7c +size 918 diff --git a/data/2025/2504_05xxx/2504.05979/images/cde20841ace4621a197135b25fda935e422854fc542a786ca15f9aea2eb46486.jpg b/data/2025/2504_05xxx/2504.05979/images/cde20841ace4621a197135b25fda935e422854fc542a786ca15f9aea2eb46486.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c22ffc81fb4c0279816b12bf8cbb1ede94e20917 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/cde20841ace4621a197135b25fda935e422854fc542a786ca15f9aea2eb46486.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7eaedafdf56c28747151bd8605444ea94412a8e975ae4d7d8460bcc3e9d2c34b +size 15553 diff --git a/data/2025/2504_05xxx/2504.05979/images/ce1b3984804538be38498c0aaa6fc99ea0991de8d1262b9961e409a6f9e0f3cb.jpg b/data/2025/2504_05xxx/2504.05979/images/ce1b3984804538be38498c0aaa6fc99ea0991de8d1262b9961e409a6f9e0f3cb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28ebef3b87252d2a41219bdbb52d0647ae6436a7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ce1b3984804538be38498c0aaa6fc99ea0991de8d1262b9961e409a6f9e0f3cb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d82f1ceda510643c7026b1f9c475a761a968921045948b70a04b92da587b73cc +size 8764 diff --git a/data/2025/2504_05xxx/2504.05979/images/ce4d2858b6a29f44fe079438ac4f53fe674726328c863a6f0a045947d1adac83.jpg b/data/2025/2504_05xxx/2504.05979/images/ce4d2858b6a29f44fe079438ac4f53fe674726328c863a6f0a045947d1adac83.jpg new file mode 100644 index 0000000000000000000000000000000000000000..13bda46a460848b397ee1b96e7442f62d2d8e6a2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ce4d2858b6a29f44fe079438ac4f53fe674726328c863a6f0a045947d1adac83.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1218491e85fe8f0920f45a127f89a4d1c0d08cdc6ff69a1fc14683b35452fef9 +size 15078 diff --git a/data/2025/2504_05xxx/2504.05979/images/cec51b759ae2fd86f4a6cd9245c40a9af755b055698dd1da10fcbcf1565849ae.jpg b/data/2025/2504_05xxx/2504.05979/images/cec51b759ae2fd86f4a6cd9245c40a9af755b055698dd1da10fcbcf1565849ae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d9ac2fa5dd8b3bad8d1f6b73c6731304eb281269 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/cec51b759ae2fd86f4a6cd9245c40a9af755b055698dd1da10fcbcf1565849ae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75d375fd4ff3098e301a2e65db414f46aee1d3c7375518678cee21944f45b857 +size 14394 diff --git a/data/2025/2504_05xxx/2504.05979/images/cf3a11167038e14a620917c62c66c24cffe74d0bace22efc3b2051879d1ac82a.jpg b/data/2025/2504_05xxx/2504.05979/images/cf3a11167038e14a620917c62c66c24cffe74d0bace22efc3b2051879d1ac82a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..742c347b29934b1f175a60c0bd941f78b02b9557 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/cf3a11167038e14a620917c62c66c24cffe74d0bace22efc3b2051879d1ac82a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d8f6da91837dc7b2f89f5f0f29ad44675b5585df2abee2fcc7da274d78b6597 +size 13103 diff --git a/data/2025/2504_05xxx/2504.05979/images/cf3e7ef91003317002f53f486a244b5640ca63c14670c2003aa978121ccb908c.jpg b/data/2025/2504_05xxx/2504.05979/images/cf3e7ef91003317002f53f486a244b5640ca63c14670c2003aa978121ccb908c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ef0a86c1627a7a09bf4c531fd99b0cf1affd1c5d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/cf3e7ef91003317002f53f486a244b5640ca63c14670c2003aa978121ccb908c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2456607d678d0ce56cf8056ea1b0f15832f8559ae57e601e9dccd84918284694 +size 12852 diff --git a/data/2025/2504_05xxx/2504.05979/images/cfa4811e58b535cdfd35ac61baf40239638c41912743eed71869ca83aded9682.jpg b/data/2025/2504_05xxx/2504.05979/images/cfa4811e58b535cdfd35ac61baf40239638c41912743eed71869ca83aded9682.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73bb51a95db0cbfa40994856f67623514e8a344e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/cfa4811e58b535cdfd35ac61baf40239638c41912743eed71869ca83aded9682.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f59eeaa36441d4c37131b1a7c29ad8a5a894f969c84c71c03c1e194128296282 +size 948 diff --git a/data/2025/2504_05xxx/2504.05979/images/d02f5e582a519b9d44e58b3ed2a49efbf23380fc85233c22949e12e735fc0378.jpg b/data/2025/2504_05xxx/2504.05979/images/d02f5e582a519b9d44e58b3ed2a49efbf23380fc85233c22949e12e735fc0378.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7cf57b562362a5f775ce6e41cb69abd4c3d9afc5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d02f5e582a519b9d44e58b3ed2a49efbf23380fc85233c22949e12e735fc0378.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:275b713a1c80992785ee8eb2cb829932e2e19ed34e7097c860d5203a76082519 +size 12331 diff --git a/data/2025/2504_05xxx/2504.05979/images/d0a18282d6404ef0ab645148e73d17fe445982eb8c40cb3c0d5a9081efd6d52c.jpg b/data/2025/2504_05xxx/2504.05979/images/d0a18282d6404ef0ab645148e73d17fe445982eb8c40cb3c0d5a9081efd6d52c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4f9db8ebf8a434e5561c78ad395f24e9194c0cc7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d0a18282d6404ef0ab645148e73d17fe445982eb8c40cb3c0d5a9081efd6d52c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c133a0e1c9551727f956a94fc2fdc87a9cc4d6855773f65502f66feaf3d68400 +size 20204 diff --git a/data/2025/2504_05xxx/2504.05979/images/d0e994754d3ed22f81935820cf17a41860327c42094e23bb6b7a579509ca0543.jpg b/data/2025/2504_05xxx/2504.05979/images/d0e994754d3ed22f81935820cf17a41860327c42094e23bb6b7a579509ca0543.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6203684adcf66b186b15e045b8a00107b0dd9d0b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d0e994754d3ed22f81935820cf17a41860327c42094e23bb6b7a579509ca0543.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:590f9a14c7b63726e736b3f4263aad405097e3c2981661fd43d8797c36eb7a5c +size 6234 diff --git a/data/2025/2504_05xxx/2504.05979/images/d0fbcde1d1f0ba76a9c49ab60586f0a6bd39c2d0d6c38d24f1164c0b482dacaa.jpg b/data/2025/2504_05xxx/2504.05979/images/d0fbcde1d1f0ba76a9c49ab60586f0a6bd39c2d0d6c38d24f1164c0b482dacaa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..357c19839e3259017e7b70758bd35fdec7122abe --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d0fbcde1d1f0ba76a9c49ab60586f0a6bd39c2d0d6c38d24f1164c0b482dacaa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5a5b3302e83fe6f5830d696332184ae4eef03028060c7242ef165183e32ae78 +size 5065 diff --git a/data/2025/2504_05xxx/2504.05979/images/d10a34e202b94e2de1a4f2d1cd215f5be631f1577e3a8ebeac90787c1852e8e9.jpg b/data/2025/2504_05xxx/2504.05979/images/d10a34e202b94e2de1a4f2d1cd215f5be631f1577e3a8ebeac90787c1852e8e9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6df800089e44846c8c5ec62b7cc187715221a5a7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d10a34e202b94e2de1a4f2d1cd215f5be631f1577e3a8ebeac90787c1852e8e9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bfa919f6caaa4c16f528ce5b3584dea01c30f5087a561b97ab6152ef4d27fb4 +size 13494 diff --git a/data/2025/2504_05xxx/2504.05979/images/d163c0b8a05c2d83cbe87b0c94311aadf8a7505417a3639fcd47509e36995332.jpg b/data/2025/2504_05xxx/2504.05979/images/d163c0b8a05c2d83cbe87b0c94311aadf8a7505417a3639fcd47509e36995332.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5cf5d185c1193dcd7f9fe480798f9a48a0e9b3bc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d163c0b8a05c2d83cbe87b0c94311aadf8a7505417a3639fcd47509e36995332.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ae21c69fd48464d49ce0ce0468c2ace4f82a708aa0f21b9a9361afd5cff3a6e +size 15310 diff --git a/data/2025/2504_05xxx/2504.05979/images/d1b34b2ffc2bfa4c9301c7d1c2ce46deef595c8d9d33f80db8a4a3538f6a89c5.jpg b/data/2025/2504_05xxx/2504.05979/images/d1b34b2ffc2bfa4c9301c7d1c2ce46deef595c8d9d33f80db8a4a3538f6a89c5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28b54f6449cac2de7ca3f98e3392dee0077cf7ca --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d1b34b2ffc2bfa4c9301c7d1c2ce46deef595c8d9d33f80db8a4a3538f6a89c5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:983f1e33f39eb558b904c90ce281cee31738ea4b0c3f1fa18daca52fcafa368c +size 12196 diff --git a/data/2025/2504_05xxx/2504.05979/images/d215069ec65dc0f111efbae6b16c00981d2ebba9711197049d3d10d379a5fe6f.jpg b/data/2025/2504_05xxx/2504.05979/images/d215069ec65dc0f111efbae6b16c00981d2ebba9711197049d3d10d379a5fe6f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8ef67f8ac7f8c578bc994957e9a14b8dac8c0e3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d215069ec65dc0f111efbae6b16c00981d2ebba9711197049d3d10d379a5fe6f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b4776476fc4eee8fec235e0b9fe7d497b03b891cea572c60f1eaf687a3c0972 +size 5221 diff --git a/data/2025/2504_05xxx/2504.05979/images/d238501a0cfe7a41a5b2875bb5e131d9ad25dd839d448e2d25bc5ab1fcd60ac6.jpg b/data/2025/2504_05xxx/2504.05979/images/d238501a0cfe7a41a5b2875bb5e131d9ad25dd839d448e2d25bc5ab1fcd60ac6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb3ab18a7a25e539e9074f54288ede56aaa31727 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d238501a0cfe7a41a5b2875bb5e131d9ad25dd839d448e2d25bc5ab1fcd60ac6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34ac9dcbfef7a68f8e201da40ba2d0b53ee3de2360be81348cf7cbc395abae10 +size 9826 diff --git a/data/2025/2504_05xxx/2504.05979/images/d25e5d34348f2c9916b8707da5ebf28418590c98d607597c3205410432cd39ab.jpg b/data/2025/2504_05xxx/2504.05979/images/d25e5d34348f2c9916b8707da5ebf28418590c98d607597c3205410432cd39ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f02f62c2e6ed781de16ed7095eb1c2649f25a70 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d25e5d34348f2c9916b8707da5ebf28418590c98d607597c3205410432cd39ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1685e26689377a180f0bc687f3fb0f5e01ffdf795920c6c2d8ebb2098b3bd00 +size 17235 diff --git a/data/2025/2504_05xxx/2504.05979/images/d2aadb5f85512222f6151124f2d78a026acca8ece755c80d998f65be5f2e8ae8.jpg b/data/2025/2504_05xxx/2504.05979/images/d2aadb5f85512222f6151124f2d78a026acca8ece755c80d998f65be5f2e8ae8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..77a868dece29b7334e0b848b3c0506bf0048b723 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d2aadb5f85512222f6151124f2d78a026acca8ece755c80d998f65be5f2e8ae8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0a925a47e5b982475161c378b59c36686c9a8db583d87776dd05dda6821c007 +size 11535 diff --git a/data/2025/2504_05xxx/2504.05979/images/d2dc794b351faf6fef98c864ed3e399cf079c26ef90eadbb34e0c6fc3e4be78c.jpg b/data/2025/2504_05xxx/2504.05979/images/d2dc794b351faf6fef98c864ed3e399cf079c26ef90eadbb34e0c6fc3e4be78c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d82be242331d50c20d1e11245cf06e0dc72b92d8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d2dc794b351faf6fef98c864ed3e399cf079c26ef90eadbb34e0c6fc3e4be78c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55420c51e2d918b97333f6fefbc3fe495e30603bc9632318b2f3b8c09ee9ad46 +size 9157 diff --git a/data/2025/2504_05xxx/2504.05979/images/d328a02168204bffd240fc039c323fc7a9152c7db8d314980163eda7d00ecf5d.jpg b/data/2025/2504_05xxx/2504.05979/images/d328a02168204bffd240fc039c323fc7a9152c7db8d314980163eda7d00ecf5d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7983b3e775c31b4e35845d518913ca95c6fd4113 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d328a02168204bffd240fc039c323fc7a9152c7db8d314980163eda7d00ecf5d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ad71120250f3e918f7162bb9accc437581a4c823be2766d9929682c2f7699ba +size 18693 diff --git a/data/2025/2504_05xxx/2504.05979/images/d39af4eb408498b81d98d8cb2ffed5b472bc1b7317c2c8a5374a4a56e4dfb056.jpg b/data/2025/2504_05xxx/2504.05979/images/d39af4eb408498b81d98d8cb2ffed5b472bc1b7317c2c8a5374a4a56e4dfb056.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1efed7fd78ec6bffcd1bf66aa7ef53953c318340 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d39af4eb408498b81d98d8cb2ffed5b472bc1b7317c2c8a5374a4a56e4dfb056.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:866da9fdfe6876942b92c605cb701832fe58045976e254ddda8f44a4a8998e29 +size 6727 diff --git a/data/2025/2504_05xxx/2504.05979/images/d489e64ba5236d20a8ccd8750097eec10c9d7fb5eb57f9ae796871bfa2203c22.jpg b/data/2025/2504_05xxx/2504.05979/images/d489e64ba5236d20a8ccd8750097eec10c9d7fb5eb57f9ae796871bfa2203c22.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fcc16078842a907297f186546af0bc32116a978c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d489e64ba5236d20a8ccd8750097eec10c9d7fb5eb57f9ae796871bfa2203c22.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:633499d9652533bae3967275e0aae59d67764a437b897ebf61008c0283e75645 +size 8229 diff --git a/data/2025/2504_05xxx/2504.05979/images/d4b9f29b0ab478363d67fd9b4ba47dd4d96e7162137bd71d2ff0b6ad6599db77.jpg b/data/2025/2504_05xxx/2504.05979/images/d4b9f29b0ab478363d67fd9b4ba47dd4d96e7162137bd71d2ff0b6ad6599db77.jpg new file mode 100644 index 0000000000000000000000000000000000000000..856bf14be69ee4af709dee1cc5226d64f6fcb370 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d4b9f29b0ab478363d67fd9b4ba47dd4d96e7162137bd71d2ff0b6ad6599db77.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d07213803c5666dcfbd09138e098db0a4dc6462d8d255b010ae0a88091dd254 +size 6008 diff --git a/data/2025/2504_05xxx/2504.05979/images/d4be8e3b705492a093a2511d862d76f96fe11049428146f4bcc9bedfb9524da6.jpg b/data/2025/2504_05xxx/2504.05979/images/d4be8e3b705492a093a2511d862d76f96fe11049428146f4bcc9bedfb9524da6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..64d408251fdacabe7d33410ae45a14eb487902cc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d4be8e3b705492a093a2511d862d76f96fe11049428146f4bcc9bedfb9524da6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af275247c036cd6b9734e3860cd2328fd54fb8058846504ddc31a2315e427494 +size 13412 diff --git a/data/2025/2504_05xxx/2504.05979/images/d4c547c6a3d707292ee701dab81a3f08897230cfc304d056c77c1b62ede0a10e.jpg b/data/2025/2504_05xxx/2504.05979/images/d4c547c6a3d707292ee701dab81a3f08897230cfc304d056c77c1b62ede0a10e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73403a5aea66e498f2770dd4a1579abd0f7bdbb3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d4c547c6a3d707292ee701dab81a3f08897230cfc304d056c77c1b62ede0a10e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e746937c0eb867f7595efe406bb8a8615e0bef882df970b6ce2df41ad8f26f63 +size 7599 diff --git a/data/2025/2504_05xxx/2504.05979/images/d617caa5b4e6e92b1428018122e2e89713f4978830db0e21a7ee846b88ce3163.jpg b/data/2025/2504_05xxx/2504.05979/images/d617caa5b4e6e92b1428018122e2e89713f4978830db0e21a7ee846b88ce3163.jpg new file mode 100644 index 0000000000000000000000000000000000000000..06e999002a451eead5adca43784264e084ee93ac --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d617caa5b4e6e92b1428018122e2e89713f4978830db0e21a7ee846b88ce3163.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1282c9b6417958890f21713146fb9dc1fd2aed238b202e7e9684cd32a60cd8e2 +size 5506 diff --git a/data/2025/2504_05xxx/2504.05979/images/d68465b01fad9263615792827618d6bc3ea2ac4994279661c281ea4561a93647.jpg b/data/2025/2504_05xxx/2504.05979/images/d68465b01fad9263615792827618d6bc3ea2ac4994279661c281ea4561a93647.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f455a803d08ceb2739cbdd5cb7e53fa2f4c7e9f9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d68465b01fad9263615792827618d6bc3ea2ac4994279661c281ea4561a93647.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc6ea5cbc21dd9ef9ef67673e9bc25074bd21b995a9edf5293ebcd0cc65b3209 +size 3160 diff --git a/data/2025/2504_05xxx/2504.05979/images/d6c8b55a284c54268e71caa1c9af85186c073992a08e7df944a4827df416126b.jpg b/data/2025/2504_05xxx/2504.05979/images/d6c8b55a284c54268e71caa1c9af85186c073992a08e7df944a4827df416126b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a47cbbd7c9bfcc413cb0221b4c6e06416376131 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d6c8b55a284c54268e71caa1c9af85186c073992a08e7df944a4827df416126b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d86274e1d37ab665991f5a3963c7da53bd3478c7d9b7b9c811c90ca2a941deb +size 15219 diff --git a/data/2025/2504_05xxx/2504.05979/images/d6cb4c9f05457cd35c0effca1f6abd864d75f43df91491467710d0bc3721a83c.jpg b/data/2025/2504_05xxx/2504.05979/images/d6cb4c9f05457cd35c0effca1f6abd864d75f43df91491467710d0bc3721a83c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d290cc24a6e530037c81930601427f8a2098f134 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d6cb4c9f05457cd35c0effca1f6abd864d75f43df91491467710d0bc3721a83c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:329ea36a4fb87c3251cc51f581f241281afc09146b6f98fc42c12130effcb189 +size 7579 diff --git a/data/2025/2504_05xxx/2504.05979/images/d7357528516c7245255631ca4698524569152aafd5239b1a9cd9dfa6ee24c212.jpg b/data/2025/2504_05xxx/2504.05979/images/d7357528516c7245255631ca4698524569152aafd5239b1a9cd9dfa6ee24c212.jpg new file mode 100644 index 0000000000000000000000000000000000000000..02b3147158f75db2cd0a39de2af3199b43a7d7ac --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d7357528516c7245255631ca4698524569152aafd5239b1a9cd9dfa6ee24c212.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d3794950c8ea8fb83670464d4cf412efed6b0450cff5a5631291f99491002af +size 10838 diff --git a/data/2025/2504_05xxx/2504.05979/images/d780d806748e7a2e2d245f4bb75612e3a332fab55a1cc6b0fa0ad203f17dd8b5.jpg b/data/2025/2504_05xxx/2504.05979/images/d780d806748e7a2e2d245f4bb75612e3a332fab55a1cc6b0fa0ad203f17dd8b5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cd56b68e9a4012541027704977e5ebe4872f9f5b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d780d806748e7a2e2d245f4bb75612e3a332fab55a1cc6b0fa0ad203f17dd8b5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:618eaebf628baf38a416f4b4cb77178143c3ef2f3db9f4e9d5a2007fb30cbf83 +size 8275 diff --git a/data/2025/2504_05xxx/2504.05979/images/d79345dd614218dc4a3fbadc34f3efbc6475c52df944a483358394a2ae83a4e5.jpg b/data/2025/2504_05xxx/2504.05979/images/d79345dd614218dc4a3fbadc34f3efbc6475c52df944a483358394a2ae83a4e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..95d8cdc84583a1ee82ef76427372a33004b8cbf6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d79345dd614218dc4a3fbadc34f3efbc6475c52df944a483358394a2ae83a4e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72d1808a6db88b1e694cb2ea17503eebe05f188cd11bbf7333953050c3434f0b +size 15372 diff --git a/data/2025/2504_05xxx/2504.05979/images/d7a7f1fc31d3a9490433f88485c2a950c61d2d98019f2ef1f9666ccb7987ad48.jpg b/data/2025/2504_05xxx/2504.05979/images/d7a7f1fc31d3a9490433f88485c2a950c61d2d98019f2ef1f9666ccb7987ad48.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f395f8029dca91becca8041dab22e0c35742be5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d7a7f1fc31d3a9490433f88485c2a950c61d2d98019f2ef1f9666ccb7987ad48.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19ee8c9ec767272b2cafc0eb17305b50667f8ccbd684afb88d0ed07982be5e4f +size 10324 diff --git a/data/2025/2504_05xxx/2504.05979/images/d87f5d38548a498be4acc2e6175a859f019a900cdcbd8394bd2700b4f145ed8f.jpg b/data/2025/2504_05xxx/2504.05979/images/d87f5d38548a498be4acc2e6175a859f019a900cdcbd8394bd2700b4f145ed8f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9bf9b1d0a28cf55a64e7a01a91431d45279f8702 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d87f5d38548a498be4acc2e6175a859f019a900cdcbd8394bd2700b4f145ed8f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47baea6d50a2307a3587aa0e5f88d1e67e1532921e20013583b8197dc2e30617 +size 18814 diff --git a/data/2025/2504_05xxx/2504.05979/images/d895489817a6f21c887841484b153778383ad7ed2f565fe9eb3bf5a334aa848c.jpg b/data/2025/2504_05xxx/2504.05979/images/d895489817a6f21c887841484b153778383ad7ed2f565fe9eb3bf5a334aa848c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69ca5e7c8fff6a40d3661b4c797bea02e339d822 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d895489817a6f21c887841484b153778383ad7ed2f565fe9eb3bf5a334aa848c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa3d0e98675aeaa2a8020bb2dae92974bc9d4afd2329d86939c5334647057275 +size 11384 diff --git a/data/2025/2504_05xxx/2504.05979/images/d89d446208b65160e03ce4ced290520055aa6e2461f4822a704190db34fdc06c.jpg b/data/2025/2504_05xxx/2504.05979/images/d89d446208b65160e03ce4ced290520055aa6e2461f4822a704190db34fdc06c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cc2da295350f59cd1ca2ac5aa59cedaa0d83cd7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d89d446208b65160e03ce4ced290520055aa6e2461f4822a704190db34fdc06c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94a630803dfa45a65d9acd78ad94522d08cfb322e271bdab48ea51c2e88a3ad4 +size 12894 diff --git a/data/2025/2504_05xxx/2504.05979/images/d8de1fb96772ae2a9a2c7fa60dfee2b9c9838cdd7633d066debcab1d110abac9.jpg b/data/2025/2504_05xxx/2504.05979/images/d8de1fb96772ae2a9a2c7fa60dfee2b9c9838cdd7633d066debcab1d110abac9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..53b707a66a250423bdcd711e42465ac5bf4c8222 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d8de1fb96772ae2a9a2c7fa60dfee2b9c9838cdd7633d066debcab1d110abac9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27446a32798fdcf109beea2873172cf932ffb47068bc8845cc92165c5fccc968 +size 10714 diff --git a/data/2025/2504_05xxx/2504.05979/images/d900d2a2f4a7e03bc322a876b3c802692357c1fec98fb4a0bdfe6de558f8f44a.jpg b/data/2025/2504_05xxx/2504.05979/images/d900d2a2f4a7e03bc322a876b3c802692357c1fec98fb4a0bdfe6de558f8f44a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8652da6e3d8ee176f2fb501508359c4606e3cef9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d900d2a2f4a7e03bc322a876b3c802692357c1fec98fb4a0bdfe6de558f8f44a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7a3c50cd66b17346a96bc7787125bd118af01bf50ea2ae431f3988389efbcd2 +size 6502 diff --git a/data/2025/2504_05xxx/2504.05979/images/d96c269179392a477b20cac488bf504f948497d76d9712d151212c0dc9a0c84f.jpg b/data/2025/2504_05xxx/2504.05979/images/d96c269179392a477b20cac488bf504f948497d76d9712d151212c0dc9a0c84f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1cf2758c73fe11068081a2321ecafbd7c40b6902 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d96c269179392a477b20cac488bf504f948497d76d9712d151212c0dc9a0c84f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41c9bb1c0882b4d0ffce905bb0982fb06b48f46f28dd7c5ca34ccd5771ee4384 +size 13771 diff --git a/data/2025/2504_05xxx/2504.05979/images/d9be1ffaecc96d29076914277e7c30d3db8c4735a052211f5df25b329509f3bf.jpg b/data/2025/2504_05xxx/2504.05979/images/d9be1ffaecc96d29076914277e7c30d3db8c4735a052211f5df25b329509f3bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2703870064e4f71cd2ac323a3bc4f64000336c9e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d9be1ffaecc96d29076914277e7c30d3db8c4735a052211f5df25b329509f3bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ddb68957635cbab91bca93740be71d03ba8f99054c6c5e129d73a0d807b4d43 +size 5064 diff --git a/data/2025/2504_05xxx/2504.05979/images/d9fef0de375f6e8e87b378aa29d3ee9d9b91df21b84ed0eb8eaf780d387eb2f7.jpg b/data/2025/2504_05xxx/2504.05979/images/d9fef0de375f6e8e87b378aa29d3ee9d9b91df21b84ed0eb8eaf780d387eb2f7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e863b50c8e96402e4c9a4b02114b48138a1bdb08 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/d9fef0de375f6e8e87b378aa29d3ee9d9b91df21b84ed0eb8eaf780d387eb2f7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce4f15ae5d703d39d89a500c01dd360ea3305e8223e4b515a106564d7341c7d5 +size 10817 diff --git a/data/2025/2504_05xxx/2504.05979/images/daa8c1bb0acfd6e425d089f419400308811f1212d5b44f851d12ef8b15bbe500.jpg b/data/2025/2504_05xxx/2504.05979/images/daa8c1bb0acfd6e425d089f419400308811f1212d5b44f851d12ef8b15bbe500.jpg new file mode 100644 index 0000000000000000000000000000000000000000..acb6b79b8156c27ac44572eddb3566c015a5d3aa --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/daa8c1bb0acfd6e425d089f419400308811f1212d5b44f851d12ef8b15bbe500.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:675fa390ab254167e98c34876c941fb80fd0ba5e9fc94e7dde6a1ec2265ade2a +size 16220 diff --git a/data/2025/2504_05xxx/2504.05979/images/db93f8cdc75167415d8ec0a2d7ba0ce8bac4ffe433a2ff11a79751a9deb5cc51.jpg b/data/2025/2504_05xxx/2504.05979/images/db93f8cdc75167415d8ec0a2d7ba0ce8bac4ffe433a2ff11a79751a9deb5cc51.jpg new file mode 100644 index 0000000000000000000000000000000000000000..70545967f18e0597d5a3ab7ad09c3347229baf68 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/db93f8cdc75167415d8ec0a2d7ba0ce8bac4ffe433a2ff11a79751a9deb5cc51.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cbacfecb7a4bb2c88d81fa14d9171994f9fc4687e2db2d917b7c5f081254728 +size 10544 diff --git a/data/2025/2504_05xxx/2504.05979/images/dbd74c382963ca8096f3df2cbf8c15b1dc9bb0dcf7b14f2691ca5f3969de4ae6.jpg b/data/2025/2504_05xxx/2504.05979/images/dbd74c382963ca8096f3df2cbf8c15b1dc9bb0dcf7b14f2691ca5f3969de4ae6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..688c75d0954281db9e6dce94257c141cc647f338 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/dbd74c382963ca8096f3df2cbf8c15b1dc9bb0dcf7b14f2691ca5f3969de4ae6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3086d24fb6645bc043e1ca0e8af66b987f7074b1ed7d411c8f99a15c697583e +size 6869 diff --git a/data/2025/2504_05xxx/2504.05979/images/dccfcb457f6ecc58a1354c97e2786be9af3ddba306e9c14119bc34b83281433c.jpg b/data/2025/2504_05xxx/2504.05979/images/dccfcb457f6ecc58a1354c97e2786be9af3ddba306e9c14119bc34b83281433c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..65a12556b4e3446204a3a81ee8606189e6332ce5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/dccfcb457f6ecc58a1354c97e2786be9af3ddba306e9c14119bc34b83281433c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:095d88f3376bf1ca56e9f52884c7b10441bbb78024faff83df45e59686cff590 +size 8738 diff --git a/data/2025/2504_05xxx/2504.05979/images/dcde7c31e72b7a5f000f1f4ce3071419d9f67361a8d18ec4966c3bb5f4513ab0.jpg b/data/2025/2504_05xxx/2504.05979/images/dcde7c31e72b7a5f000f1f4ce3071419d9f67361a8d18ec4966c3bb5f4513ab0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cb4932e471fdf9ff895424ae1c47d692406d0e53 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/dcde7c31e72b7a5f000f1f4ce3071419d9f67361a8d18ec4966c3bb5f4513ab0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ced215af881f388de3ec47411d261d9d9ee4e7bfa37028ffeefa7e677350d1d8 +size 11954 diff --git a/data/2025/2504_05xxx/2504.05979/images/dd17353d44f66bbb4b35301466dd1088e56a6dbb759f8d6e1a798e291070cdb4.jpg b/data/2025/2504_05xxx/2504.05979/images/dd17353d44f66bbb4b35301466dd1088e56a6dbb759f8d6e1a798e291070cdb4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..71ec2d471220d57e4fa2c41e1172db61c470f126 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/dd17353d44f66bbb4b35301466dd1088e56a6dbb759f8d6e1a798e291070cdb4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b5a47fe93d9ae05587618ca5e9fb67de09d6be96945fc5288c8c0e4439cbc87 +size 7627 diff --git a/data/2025/2504_05xxx/2504.05979/images/dd2dae5038367f85e716992705228d85cfed4fff5609bcaadfd9baa3415e36b6.jpg b/data/2025/2504_05xxx/2504.05979/images/dd2dae5038367f85e716992705228d85cfed4fff5609bcaadfd9baa3415e36b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a69b875321ca40f58669bfa8ecbb03236f86d40d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/dd2dae5038367f85e716992705228d85cfed4fff5609bcaadfd9baa3415e36b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c73b35bc8e7a4512ff9c5dc13dcd5c7887e19d99e5c3952b1e99c663c73828b +size 9521 diff --git a/data/2025/2504_05xxx/2504.05979/images/dd4f8ceb102f253e19dc4e99a539f2e28701caecb904ac3596b116d8f9213bed.jpg b/data/2025/2504_05xxx/2504.05979/images/dd4f8ceb102f253e19dc4e99a539f2e28701caecb904ac3596b116d8f9213bed.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e5c0d667fcae9c8891ff87f532e670b0615b634 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/dd4f8ceb102f253e19dc4e99a539f2e28701caecb904ac3596b116d8f9213bed.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e7ee4b59a2d0cd2fc8f161ce48f762da76da68ed8cc3255a3b4f111ba92c9db +size 7972 diff --git a/data/2025/2504_05xxx/2504.05979/images/dd6454a272ee52c994805637b87e75f5966a46cdbd76435fa0cf210c205b0168.jpg b/data/2025/2504_05xxx/2504.05979/images/dd6454a272ee52c994805637b87e75f5966a46cdbd76435fa0cf210c205b0168.jpg new file mode 100644 index 0000000000000000000000000000000000000000..71ec9556e6463d757e57fbd37b7b22c0c868d550 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/dd6454a272ee52c994805637b87e75f5966a46cdbd76435fa0cf210c205b0168.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8e17254bc565ab0fcbf7ea3dbb46e3868bccbd7f63c73466cc8b60fe838dadc +size 10093 diff --git a/data/2025/2504_05xxx/2504.05979/images/ddca6e48a5804db47d5999c3e273c4763a7ecac6ea67d75a092e6a71ab128b72.jpg b/data/2025/2504_05xxx/2504.05979/images/ddca6e48a5804db47d5999c3e273c4763a7ecac6ea67d75a092e6a71ab128b72.jpg new file mode 100644 index 0000000000000000000000000000000000000000..06471fceb01e69fd1abd98e8d311a7c51a5e710f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ddca6e48a5804db47d5999c3e273c4763a7ecac6ea67d75a092e6a71ab128b72.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88e42a41bff7a73c4b6665a88ee29967912b9257415411c7600ca8a0d3dfc548 +size 5732 diff --git a/data/2025/2504_05xxx/2504.05979/images/ddccd12b79d35e83a67c0ec67ebca9d6ffdf81f90dbdc128897666c54e5ca249.jpg b/data/2025/2504_05xxx/2504.05979/images/ddccd12b79d35e83a67c0ec67ebca9d6ffdf81f90dbdc128897666c54e5ca249.jpg new file mode 100644 index 0000000000000000000000000000000000000000..93082bd474c328d630246008067d2165c79586c0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ddccd12b79d35e83a67c0ec67ebca9d6ffdf81f90dbdc128897666c54e5ca249.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dbbfe6c321389811326dbeead864ea84fa88d9064f1865e6c0064c2c4683a52 +size 12189 diff --git a/data/2025/2504_05xxx/2504.05979/images/de07423ffdd0dc744cf6f4c7b38144d08ecc3112098ef6d6efbfb06253fb9def.jpg b/data/2025/2504_05xxx/2504.05979/images/de07423ffdd0dc744cf6f4c7b38144d08ecc3112098ef6d6efbfb06253fb9def.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c91f876871f7859cc19d5a3a0595a43791a2e4d3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/de07423ffdd0dc744cf6f4c7b38144d08ecc3112098ef6d6efbfb06253fb9def.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:063d04592eaebaf6ed05a40f41099b039b1ee8bf35af49868434bdc457daba7a +size 5230 diff --git a/data/2025/2504_05xxx/2504.05979/images/de15cb94a339b40e3ca5dcb35e27521f86d056376dba353e39d9dfc849b5350a.jpg b/data/2025/2504_05xxx/2504.05979/images/de15cb94a339b40e3ca5dcb35e27521f86d056376dba353e39d9dfc849b5350a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6632c8d2f4f82236fcbc53966e15cd8e437fdff1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/de15cb94a339b40e3ca5dcb35e27521f86d056376dba353e39d9dfc849b5350a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5b196689be86461780a4fc5cd63c6220c55c6d5f54c98aa6c0eb6f1ce118c6c +size 15272 diff --git a/data/2025/2504_05xxx/2504.05979/images/de2131f5c3c16b1cda2aa0dec3ae6fe0689fffc4267105db8583bef2e64b0cad.jpg b/data/2025/2504_05xxx/2504.05979/images/de2131f5c3c16b1cda2aa0dec3ae6fe0689fffc4267105db8583bef2e64b0cad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28c7b61f38081ac5733005f5d8945b2256e5ea9e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/de2131f5c3c16b1cda2aa0dec3ae6fe0689fffc4267105db8583bef2e64b0cad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e05680b6af1303badf2e7fd8cd8d2e434bdb81d7047931928f1eaa206e088c9 +size 10945 diff --git a/data/2025/2504_05xxx/2504.05979/images/de9e123e1e1147256977150c0054e8e9a10a47b47074aa699979adc065b6358c.jpg b/data/2025/2504_05xxx/2504.05979/images/de9e123e1e1147256977150c0054e8e9a10a47b47074aa699979adc065b6358c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..acec9bcb26491695928ea13de3292ef090c2f2c1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/de9e123e1e1147256977150c0054e8e9a10a47b47074aa699979adc065b6358c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e9d33720eed60c770dc568022e93270cee37bcc2bf1ca297d4f7eea184bf0e7 +size 12741 diff --git a/data/2025/2504_05xxx/2504.05979/images/decdf079eb5a7ceb8546733501c7687db436872547911348307fba42a262e86d.jpg b/data/2025/2504_05xxx/2504.05979/images/decdf079eb5a7ceb8546733501c7687db436872547911348307fba42a262e86d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c091eb79f67124bb52bff8bbc29cf0e53a0d9a0d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/decdf079eb5a7ceb8546733501c7687db436872547911348307fba42a262e86d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c2ebae768e50a4486cbe3f546f3ad91b74cc78f03b759f535d2f449510ec1d9 +size 16689 diff --git a/data/2025/2504_05xxx/2504.05979/images/deeb960aa331c541bdfd82bbc98e2d7b1232c00088999d254c3e72f9a70ad184.jpg b/data/2025/2504_05xxx/2504.05979/images/deeb960aa331c541bdfd82bbc98e2d7b1232c00088999d254c3e72f9a70ad184.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd11ecbfb02f4d7d03fa0949603cb971edce84f0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/deeb960aa331c541bdfd82bbc98e2d7b1232c00088999d254c3e72f9a70ad184.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2fc8ef6a887dfa08c23f815f6e074d647ca386b5840c674518d56b541101e6a +size 13162 diff --git a/data/2025/2504_05xxx/2504.05979/images/df7e8259254f0cce7de2b44908edba93ace97aedc79647c2a02a490d81d15f6d.jpg b/data/2025/2504_05xxx/2504.05979/images/df7e8259254f0cce7de2b44908edba93ace97aedc79647c2a02a490d81d15f6d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..52f9ddf4f5610db5bf806501e9e02d82ed8c70bb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/df7e8259254f0cce7de2b44908edba93ace97aedc79647c2a02a490d81d15f6d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1fbca078de230f85bb90a0efb5196ca960185c418b77fcaefa2823e944fb636 +size 6991 diff --git a/data/2025/2504_05xxx/2504.05979/images/dfa857e58ea3978f96a70b80835f05da2c3c164ea663a535310bee9d91124824.jpg b/data/2025/2504_05xxx/2504.05979/images/dfa857e58ea3978f96a70b80835f05da2c3c164ea663a535310bee9d91124824.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bf2b444b146cefdb54d8867fdbf4aac524ec7d3f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/dfa857e58ea3978f96a70b80835f05da2c3c164ea663a535310bee9d91124824.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9c4b7b2946a4ab2047f0770a1c62873a7833de0201998787db9293311555dab +size 12385 diff --git a/data/2025/2504_05xxx/2504.05979/images/dfdea49947535ad053094764e4f1c4f57b87c66ef48041b87f0c1403e8ebfb06.jpg b/data/2025/2504_05xxx/2504.05979/images/dfdea49947535ad053094764e4f1c4f57b87c66ef48041b87f0c1403e8ebfb06.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c651607df4697e081ed8830577a1d3c898b8ec8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/dfdea49947535ad053094764e4f1c4f57b87c66ef48041b87f0c1403e8ebfb06.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:587a5d365dc0e86cd4e415926ff2b52c7d3950f724963ed4eb842dfb6cc301a2 +size 14652 diff --git a/data/2025/2504_05xxx/2504.05979/images/dfe5bd0363834d7a517a03c99514ab82f92032ef90b648cf55ef38ce418a6054.jpg b/data/2025/2504_05xxx/2504.05979/images/dfe5bd0363834d7a517a03c99514ab82f92032ef90b648cf55ef38ce418a6054.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3926540140dac575789fa36602f91a4a1a8551f2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/dfe5bd0363834d7a517a03c99514ab82f92032ef90b648cf55ef38ce418a6054.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cba4a46903fe7a2fc8aa9eaeaab68473452c7637d81dd54d2170539b7dbdbbc +size 5830 diff --git a/data/2025/2504_05xxx/2504.05979/images/e06a1d22dd6093751188bb61a4da58c22606454753b4f5ef255503039c23b4d9.jpg b/data/2025/2504_05xxx/2504.05979/images/e06a1d22dd6093751188bb61a4da58c22606454753b4f5ef255503039c23b4d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a66e942f1834054201a65bdfbf5e38170b1cd7bc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e06a1d22dd6093751188bb61a4da58c22606454753b4f5ef255503039c23b4d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f95209c9e41e6320c7d0aa653c92fdb501b906425172965d086c01a93016bf8d +size 6768 diff --git a/data/2025/2504_05xxx/2504.05979/images/e099a5ea018e11845a670b91da9b9e084bcfb35931d3f4ead8ac754370ad78b3.jpg b/data/2025/2504_05xxx/2504.05979/images/e099a5ea018e11845a670b91da9b9e084bcfb35931d3f4ead8ac754370ad78b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6e7844a9595293295bbe0de4e7e31b6ef6258c3c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e099a5ea018e11845a670b91da9b9e084bcfb35931d3f4ead8ac754370ad78b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2627ebfba29adf2805f5fe5dce6420c64006cfa7eab803c7171957aca7ebf8d2 +size 13328 diff --git a/data/2025/2504_05xxx/2504.05979/images/e0ea300e9865c0a0b5c3f4003da7770db1b8cd75b5f648d2fb6667866807f1bb.jpg b/data/2025/2504_05xxx/2504.05979/images/e0ea300e9865c0a0b5c3f4003da7770db1b8cd75b5f648d2fb6667866807f1bb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a37e4edc636d49f6bb7fa611cc0dbf1e3a0361a8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e0ea300e9865c0a0b5c3f4003da7770db1b8cd75b5f648d2fb6667866807f1bb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99834e3a18abf0f276dbd55472d67819cc5a1d72dde7f4d8dab097014253d949 +size 11820 diff --git a/data/2025/2504_05xxx/2504.05979/images/e138171044c5ed197f98e471ee68b0a7988c369038ddde3296f693594e3ed2c6.jpg b/data/2025/2504_05xxx/2504.05979/images/e138171044c5ed197f98e471ee68b0a7988c369038ddde3296f693594e3ed2c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..37c69e70a01910e8cf334060f460fbb94868ee47 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e138171044c5ed197f98e471ee68b0a7988c369038ddde3296f693594e3ed2c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbffea94b024583423ad0f6761737cb37c43c562ba06ed67aa997ccaab061d0d +size 6372 diff --git a/data/2025/2504_05xxx/2504.05979/images/e15b31d827df49078bd8ff4b5a3c0b7e9766b7e0ce33322d4b3e8fa24173ca40.jpg b/data/2025/2504_05xxx/2504.05979/images/e15b31d827df49078bd8ff4b5a3c0b7e9766b7e0ce33322d4b3e8fa24173ca40.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db8d28ac8e3525a2d54cac10081e7c67d595193b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e15b31d827df49078bd8ff4b5a3c0b7e9766b7e0ce33322d4b3e8fa24173ca40.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:482fac1da50cc85c595ed33ee8e8cfbd377549462d8cb6ed5ed73e4124ec678c +size 6141 diff --git a/data/2025/2504_05xxx/2504.05979/images/e1866d261c2cafcac4f4dbe2ed648662bbfe95d442c888ae66ee515ecf40b804.jpg b/data/2025/2504_05xxx/2504.05979/images/e1866d261c2cafcac4f4dbe2ed648662bbfe95d442c888ae66ee515ecf40b804.jpg new file mode 100644 index 0000000000000000000000000000000000000000..44dd2c782938eba2963558159e68ed5cabadf28c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e1866d261c2cafcac4f4dbe2ed648662bbfe95d442c888ae66ee515ecf40b804.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e27429f97d85e7adab1139e22bef9c09ec53105c68d98331ae51bd788bd48e9 +size 14048 diff --git a/data/2025/2504_05xxx/2504.05979/images/e19a526ae728ef205a6d09c27b250ea5bcb7433bc30ff0fe90075b9fa80b7e50.jpg b/data/2025/2504_05xxx/2504.05979/images/e19a526ae728ef205a6d09c27b250ea5bcb7433bc30ff0fe90075b9fa80b7e50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b8492898e0c67ce280260bfb1bd329ec32ff1201 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e19a526ae728ef205a6d09c27b250ea5bcb7433bc30ff0fe90075b9fa80b7e50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36a616a64a4634bf8b0024fe9906cb870f64c75ba13fa1c36c8f07120c96eba6 +size 7987 diff --git a/data/2025/2504_05xxx/2504.05979/images/e1dc82e6233835aef84e7c0d62fd27535b3b4aee7f6bc4beab765dacdbbdc4c1.jpg b/data/2025/2504_05xxx/2504.05979/images/e1dc82e6233835aef84e7c0d62fd27535b3b4aee7f6bc4beab765dacdbbdc4c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b114f38c2603733c4e9b36f704c310d79d1ab54 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e1dc82e6233835aef84e7c0d62fd27535b3b4aee7f6bc4beab765dacdbbdc4c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33675e422fef9d3a35a16bdb83a732bf84b33eb7d4d33fc53b11a519d1ddd698 +size 16068 diff --git a/data/2025/2504_05xxx/2504.05979/images/e21293e1dde8f2f8a8736c0a4d673afa08874092081ea4b1ec07788bbe177b05.jpg b/data/2025/2504_05xxx/2504.05979/images/e21293e1dde8f2f8a8736c0a4d673afa08874092081ea4b1ec07788bbe177b05.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4fadee84d46eaa014e4d19334ebddb652b902dc5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e21293e1dde8f2f8a8736c0a4d673afa08874092081ea4b1ec07788bbe177b05.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b29517e34e70e22e6af5fe2bfd5f604351396cb02a05f6d266169f4cb36b89e8 +size 16603 diff --git a/data/2025/2504_05xxx/2504.05979/images/e2bafc2e24743ad788bd64f544fbd0056daacff8e5ccf84bf8a7e7d5bd57b702.jpg b/data/2025/2504_05xxx/2504.05979/images/e2bafc2e24743ad788bd64f544fbd0056daacff8e5ccf84bf8a7e7d5bd57b702.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d80f7dbeb4b1e902371dc19b50691c0fe0a06349 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e2bafc2e24743ad788bd64f544fbd0056daacff8e5ccf84bf8a7e7d5bd57b702.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3d2ed742cc68620effed26775522984d9581267ba68e87fd3366e899fc32551 +size 4745 diff --git a/data/2025/2504_05xxx/2504.05979/images/e2e8d2f4bc567a25d1d7f56d98489dc9897b06c00ba90d8a58c28ba70a1e2151.jpg b/data/2025/2504_05xxx/2504.05979/images/e2e8d2f4bc567a25d1d7f56d98489dc9897b06c00ba90d8a58c28ba70a1e2151.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9fa282b862a7041955d4328c8acd63818eec55f7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e2e8d2f4bc567a25d1d7f56d98489dc9897b06c00ba90d8a58c28ba70a1e2151.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:175207da73ce1ac5b6827a114e8e2c4ac4757b0a9dc5fbbc334848cf9dd4290e +size 20450 diff --git a/data/2025/2504_05xxx/2504.05979/images/e324eb93d8c4dfceef9d783883e1f613acfba48879633c0d04fd8039c2f4ba20.jpg b/data/2025/2504_05xxx/2504.05979/images/e324eb93d8c4dfceef9d783883e1f613acfba48879633c0d04fd8039c2f4ba20.jpg new file mode 100644 index 0000000000000000000000000000000000000000..595cf8175bc6650844a4ac272e8943cad1c8f159 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e324eb93d8c4dfceef9d783883e1f613acfba48879633c0d04fd8039c2f4ba20.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:553e9fdf55104e3682cd6ee2d3c98b853ede3c8a513a4f3cad1184739632341b +size 7919 diff --git a/data/2025/2504_05xxx/2504.05979/images/e331257e3a002ea35aae9b3560e329ae51aea6ec05a4a8f8fc4c533cfe981c94.jpg b/data/2025/2504_05xxx/2504.05979/images/e331257e3a002ea35aae9b3560e329ae51aea6ec05a4a8f8fc4c533cfe981c94.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7aebaa49523b68a0ac1cdaeb1597289b05aa0f10 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e331257e3a002ea35aae9b3560e329ae51aea6ec05a4a8f8fc4c533cfe981c94.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:618fca95c8bc0a8bd2b7ccdc16043f72a4aa2737d348755a722d5476d0757805 +size 7784 diff --git a/data/2025/2504_05xxx/2504.05979/images/e37560da746ee64c9f4dbda99f18792081354ee1290d5fb107e6b17833cab89b.jpg b/data/2025/2504_05xxx/2504.05979/images/e37560da746ee64c9f4dbda99f18792081354ee1290d5fb107e6b17833cab89b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0fcdef39a3120b650ee3d51076d05071ff6cd4fb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e37560da746ee64c9f4dbda99f18792081354ee1290d5fb107e6b17833cab89b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f903c7f7265bab4b40083f990367e531d41e5d56b2832b51a691c88732a4e24d +size 17902 diff --git a/data/2025/2504_05xxx/2504.05979/images/e391d4c6d98af6fcc61c7f1d8e60d08040956f96e2c5d61ef9f3aac4d730f7a0.jpg b/data/2025/2504_05xxx/2504.05979/images/e391d4c6d98af6fcc61c7f1d8e60d08040956f96e2c5d61ef9f3aac4d730f7a0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31d3061c9d2a6146e2482f21a41df5cf6b90439a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e391d4c6d98af6fcc61c7f1d8e60d08040956f96e2c5d61ef9f3aac4d730f7a0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6ae41b28a698eceb3e559ce4402dff6c0bd70cfc526fe092b83851177f473eb +size 15324 diff --git a/data/2025/2504_05xxx/2504.05979/images/e3951f420a6bebec8d982ae86c6d63b89a8e9d80db44d4a5af3d22ff2a0dabf6.jpg b/data/2025/2504_05xxx/2504.05979/images/e3951f420a6bebec8d982ae86c6d63b89a8e9d80db44d4a5af3d22ff2a0dabf6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..426d8dcc33f9acba496591124a9d6dd836bde377 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e3951f420a6bebec8d982ae86c6d63b89a8e9d80db44d4a5af3d22ff2a0dabf6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1afb4e754e6070cb79374a20b43e439ce09795ceeecad77857046de9db5d5ceb +size 8050 diff --git a/data/2025/2504_05xxx/2504.05979/images/e3c6465c6ac99b5cac8b7b87979c048e1ec6ea06f3001201cdafef489c4470e1.jpg b/data/2025/2504_05xxx/2504.05979/images/e3c6465c6ac99b5cac8b7b87979c048e1ec6ea06f3001201cdafef489c4470e1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7bfd91a94596f12e0b9cdae0f4257b340bce9958 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e3c6465c6ac99b5cac8b7b87979c048e1ec6ea06f3001201cdafef489c4470e1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a6133bec11963aa0ecd832e3f25ac739c16d4be2f5b26f634c5047c0eeacf61 +size 9215 diff --git a/data/2025/2504_05xxx/2504.05979/images/e4187d7ed6a1b514ec5676db3d2003f937bfa937564597d410716eae507bb1ca.jpg b/data/2025/2504_05xxx/2504.05979/images/e4187d7ed6a1b514ec5676db3d2003f937bfa937564597d410716eae507bb1ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2ef83e0a5beca07f3a58359c3fe880ad3ea72ee9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e4187d7ed6a1b514ec5676db3d2003f937bfa937564597d410716eae507bb1ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ebc466d077657f4edc58f0b6739ecbb17d8af2c1ca5b2c960812b0fa87ed46f +size 12509 diff --git a/data/2025/2504_05xxx/2504.05979/images/e4a1fb76e31c6e6095d35539c83419338ff04dfd9cc1cb7d1e9c18ca4ecabfab.jpg b/data/2025/2504_05xxx/2504.05979/images/e4a1fb76e31c6e6095d35539c83419338ff04dfd9cc1cb7d1e9c18ca4ecabfab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ded9a986e689e6bb7876094a0b633bffd0da501d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e4a1fb76e31c6e6095d35539c83419338ff04dfd9cc1cb7d1e9c18ca4ecabfab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d19a60a6c3fefe848a31f335b0dde9ebb78273ea412e339c110dfca55d15a5f +size 13379 diff --git a/data/2025/2504_05xxx/2504.05979/images/e4c0179dfb831279ce07771cfe8cbac48857d6e750c966acbf75a9fd4eaf0a65.jpg b/data/2025/2504_05xxx/2504.05979/images/e4c0179dfb831279ce07771cfe8cbac48857d6e750c966acbf75a9fd4eaf0a65.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a963f991db0efdb1c5249b67064a2cfe86db7e36 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e4c0179dfb831279ce07771cfe8cbac48857d6e750c966acbf75a9fd4eaf0a65.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c936d44a23fdbd4755f629a9d5c4c3a9fc6782f2edf874bf77cb128f14ad498 +size 9259 diff --git a/data/2025/2504_05xxx/2504.05979/images/e502bbf30709fbc0fd7fe24f3e4886279e719fcc412c016e104a0028ed894e18.jpg b/data/2025/2504_05xxx/2504.05979/images/e502bbf30709fbc0fd7fe24f3e4886279e719fcc412c016e104a0028ed894e18.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d2901d492a1d9c2f4fbcf83fb8034a4c9b093947 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e502bbf30709fbc0fd7fe24f3e4886279e719fcc412c016e104a0028ed894e18.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce3f7e5298ce5664e94145a27552752208118262d0c2dd349f30ebce5afed23e +size 13723 diff --git a/data/2025/2504_05xxx/2504.05979/images/e53143ad29978bfb50a47d639d116221922ece9fa13749ad760e4f2ca7cbecbf.jpg b/data/2025/2504_05xxx/2504.05979/images/e53143ad29978bfb50a47d639d116221922ece9fa13749ad760e4f2ca7cbecbf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82671a6213ffa728bc5a3aaee3d115acd25c5b12 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e53143ad29978bfb50a47d639d116221922ece9fa13749ad760e4f2ca7cbecbf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5074ae5d912b68585d4669ad7788c214e1075e833cd07cdd52f33d1d07e39840 +size 12463 diff --git a/data/2025/2504_05xxx/2504.05979/images/e533493affd9962f40a067f07476309f50b38f805e9b7e4cb111bd1d65a85975.jpg b/data/2025/2504_05xxx/2504.05979/images/e533493affd9962f40a067f07476309f50b38f805e9b7e4cb111bd1d65a85975.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4cae9389dddab0e31ca9653302d0308bae57e7fe --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e533493affd9962f40a067f07476309f50b38f805e9b7e4cb111bd1d65a85975.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ac64065ea12c901f6e7734ed5d30f69a6dc280ec68dc4aa0a59d7a97e5c8379 +size 13858 diff --git a/data/2025/2504_05xxx/2504.05979/images/e58eec2c9254e2da8414f358c4e5148a84d45f6a54957dab8cb6f162f5233635.jpg b/data/2025/2504_05xxx/2504.05979/images/e58eec2c9254e2da8414f358c4e5148a84d45f6a54957dab8cb6f162f5233635.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7958e219b4d03010687fc82edc4368cc73100031 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e58eec2c9254e2da8414f358c4e5148a84d45f6a54957dab8cb6f162f5233635.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14f473e2e6eb027d8276c71d72112cc786de1b0c99722ef845700f731a8a3d77 +size 12788 diff --git a/data/2025/2504_05xxx/2504.05979/images/e5aa20f742421c44599c060ff033d5084778874fc965254ee47e49189abed76d.jpg b/data/2025/2504_05xxx/2504.05979/images/e5aa20f742421c44599c060ff033d5084778874fc965254ee47e49189abed76d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d631f79f73546dedd0e8afa9a98d0a1879cd413 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e5aa20f742421c44599c060ff033d5084778874fc965254ee47e49189abed76d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae25c1afc48ae194f223a5fedaaf37c6854188d702dee0d9466b2819b76f87fe +size 10587 diff --git a/data/2025/2504_05xxx/2504.05979/images/e63aaf3e7583e329733bdf0d0086b196c266732e65fcc124b54b13a2ae6da734.jpg b/data/2025/2504_05xxx/2504.05979/images/e63aaf3e7583e329733bdf0d0086b196c266732e65fcc124b54b13a2ae6da734.jpg new file mode 100644 index 0000000000000000000000000000000000000000..518324067131359381e35b5ad698ce8ed5a7e747 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e63aaf3e7583e329733bdf0d0086b196c266732e65fcc124b54b13a2ae6da734.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3d32194c02ac1fe54188e852d62fbf5ec328ea8ddaf8d16eca3e3672cfc6bf0 +size 22175 diff --git a/data/2025/2504_05xxx/2504.05979/images/e6a106a5ad2b6d2b4ff973ae0d66eb902d883da385dea0f75e0dbef3aaddd4b0.jpg b/data/2025/2504_05xxx/2504.05979/images/e6a106a5ad2b6d2b4ff973ae0d66eb902d883da385dea0f75e0dbef3aaddd4b0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a9ac3f109e80830e9bfe17eb53be6c984b5b407 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e6a106a5ad2b6d2b4ff973ae0d66eb902d883da385dea0f75e0dbef3aaddd4b0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:779ee8d6612a9ad4d9aa04750477d2a30bdd4779b36d55c3c0aa714c72c6c88f +size 11136 diff --git a/data/2025/2504_05xxx/2504.05979/images/e6d59fa9484b3877034764c9fa8752e51c78a7b4eba2a3dd224cdb9236f23368.jpg b/data/2025/2504_05xxx/2504.05979/images/e6d59fa9484b3877034764c9fa8752e51c78a7b4eba2a3dd224cdb9236f23368.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7281ba3a33d06a7675321e24054da3417edbad49 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e6d59fa9484b3877034764c9fa8752e51c78a7b4eba2a3dd224cdb9236f23368.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59671f0315dbefd4c7a8393766a1189a9d9af4ea14b516c7a2cd5d46610de212 +size 11624 diff --git a/data/2025/2504_05xxx/2504.05979/images/e6edd2726b0a0b32b22ba7d28c24eb54d68706defe2d03d01109b73577922099.jpg b/data/2025/2504_05xxx/2504.05979/images/e6edd2726b0a0b32b22ba7d28c24eb54d68706defe2d03d01109b73577922099.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9a69f6b8f121d1dc9eb1bae3599b65cace843b76 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e6edd2726b0a0b32b22ba7d28c24eb54d68706defe2d03d01109b73577922099.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7c943a0d3c5b2a3b80a661e1041c790826998b5a77c13286046170ff234a07b +size 11046 diff --git a/data/2025/2504_05xxx/2504.05979/images/e70d73a385cda6554925808b97816d9c1e848d834e5052992231cc4954cfd336.jpg b/data/2025/2504_05xxx/2504.05979/images/e70d73a385cda6554925808b97816d9c1e848d834e5052992231cc4954cfd336.jpg new file mode 100644 index 0000000000000000000000000000000000000000..275e44f7eb4799a87e4e6a77756c403e0b8c3702 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e70d73a385cda6554925808b97816d9c1e848d834e5052992231cc4954cfd336.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a194aea5d434da2598becf3976cfc8539f240477b78cf9666970fb5563ca179b +size 10567 diff --git a/data/2025/2504_05xxx/2504.05979/images/e739ed6e7d2e595e03926be7ac6c7b1fb3bd3badc49f246dca21b01655f93b1c.jpg b/data/2025/2504_05xxx/2504.05979/images/e739ed6e7d2e595e03926be7ac6c7b1fb3bd3badc49f246dca21b01655f93b1c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec5ee1e2bd94fcd31b34239f3d0703909d4e06c1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e739ed6e7d2e595e03926be7ac6c7b1fb3bd3badc49f246dca21b01655f93b1c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fe810a3306c461a2a33ca6350dcb944e09bd919d14ee7a12c45c9e03f704da2 +size 9067 diff --git a/data/2025/2504_05xxx/2504.05979/images/e7467303dc6f9aead4b04680ab549cd4c9987da28a7b8e07f93928c3cc115828.jpg b/data/2025/2504_05xxx/2504.05979/images/e7467303dc6f9aead4b04680ab549cd4c9987da28a7b8e07f93928c3cc115828.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a34fe77e3ac9159227df995e50c305d60430a067 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e7467303dc6f9aead4b04680ab549cd4c9987da28a7b8e07f93928c3cc115828.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f41d9ad6504d6130910e6ae506e36d42523ebd9d9aa85002e816368a804e16d5 +size 8617 diff --git a/data/2025/2504_05xxx/2504.05979/images/e7da05826e2092da3f8c786eaba5c6a6330ab3558f6720c639f754e67843897b.jpg b/data/2025/2504_05xxx/2504.05979/images/e7da05826e2092da3f8c786eaba5c6a6330ab3558f6720c639f754e67843897b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94ed8300d40c9ea3776f92723c2c0f62a6043a1d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e7da05826e2092da3f8c786eaba5c6a6330ab3558f6720c639f754e67843897b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48cbe48bfd7518c001c02fa1a1742e21bccbffec43092012cc1b350c9e31446f +size 10954 diff --git a/data/2025/2504_05xxx/2504.05979/images/e80a0e37e52eb35595a7df3a6aa08948e175cfb951ba4bd65e8b98ab1887d928.jpg b/data/2025/2504_05xxx/2504.05979/images/e80a0e37e52eb35595a7df3a6aa08948e175cfb951ba4bd65e8b98ab1887d928.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e22e60a80a96ed2cfb5cedca4033c2fb4396c995 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e80a0e37e52eb35595a7df3a6aa08948e175cfb951ba4bd65e8b98ab1887d928.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3eddb7873a190832553e62ecf43401b6dc0039f1648f420ae25667d728c28a61 +size 10657 diff --git a/data/2025/2504_05xxx/2504.05979/images/e822d81326618b0d941cb4c3271bef0fd0548efa0ef06244f8b36e4b001ccf7a.jpg b/data/2025/2504_05xxx/2504.05979/images/e822d81326618b0d941cb4c3271bef0fd0548efa0ef06244f8b36e4b001ccf7a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..60b3f6086cf9288fdbbef775e8b79f83db5b35a3 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e822d81326618b0d941cb4c3271bef0fd0548efa0ef06244f8b36e4b001ccf7a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2a00e50d09884bab83d19224f70392ea847708a14e0498a2f270de601f1681a +size 11520 diff --git a/data/2025/2504_05xxx/2504.05979/images/e8ab46f36bede74d3eb3841233ac4c2a6eb5643b1d97d4921f99dd2c221aef40.jpg b/data/2025/2504_05xxx/2504.05979/images/e8ab46f36bede74d3eb3841233ac4c2a6eb5643b1d97d4921f99dd2c221aef40.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ab038ffd6bd1e013ad6450dec4ddbcbcc96cceb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e8ab46f36bede74d3eb3841233ac4c2a6eb5643b1d97d4921f99dd2c221aef40.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2268ef79af9d06d6410f5bd0d40eac55a72256f074f0135b9e7729f826230b5 +size 8141 diff --git a/data/2025/2504_05xxx/2504.05979/images/e8c0506441fcc7c04d242543b53f3d7af01afae300d6edb7c5571fbe34319a8c.jpg b/data/2025/2504_05xxx/2504.05979/images/e8c0506441fcc7c04d242543b53f3d7af01afae300d6edb7c5571fbe34319a8c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6d4d9756f140520c067975b7f3a675cc254bdefa --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e8c0506441fcc7c04d242543b53f3d7af01afae300d6edb7c5571fbe34319a8c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32e7f899a229eaae9e96448304b72a5eaf7048d3bbde4f1b333a2a381fb5caa1 +size 9371 diff --git a/data/2025/2504_05xxx/2504.05979/images/e914339e66bb3680ffe4e499cab448aa16a263e1ab23c54fac562df95422444c.jpg b/data/2025/2504_05xxx/2504.05979/images/e914339e66bb3680ffe4e499cab448aa16a263e1ab23c54fac562df95422444c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..99d433a40e9d091fd2a2532d9e5a5facd502ca56 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e914339e66bb3680ffe4e499cab448aa16a263e1ab23c54fac562df95422444c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0a2fd889c8d7b5e303e87f47a322692bb55da631c1eee9ebae0a7984a5a355d +size 10570 diff --git a/data/2025/2504_05xxx/2504.05979/images/e9288a0c02092fd53245823396ca1f76b2df5abca1ee697a871b5004f18dce58.jpg b/data/2025/2504_05xxx/2504.05979/images/e9288a0c02092fd53245823396ca1f76b2df5abca1ee697a871b5004f18dce58.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e36e140fe69e1c0cce632ef05aa153accaab89ed --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e9288a0c02092fd53245823396ca1f76b2df5abca1ee697a871b5004f18dce58.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f1c9a5a551c7958ab5154ebcbf9cedb0343bc87ab1eadc8941735839151d3ab +size 3074 diff --git a/data/2025/2504_05xxx/2504.05979/images/e960d89a9332326f5fac58239c1d5784d9e237d7ccede93ea1c82462b3f589b3.jpg b/data/2025/2504_05xxx/2504.05979/images/e960d89a9332326f5fac58239c1d5784d9e237d7ccede93ea1c82462b3f589b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd7ccbae6299eb46ffd7ffcb38ae197cce12f792 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e960d89a9332326f5fac58239c1d5784d9e237d7ccede93ea1c82462b3f589b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b64cff11d42be474989a36e1efdfb15208f34cf592f6d6b6e37be0c73cbfffb7 +size 992 diff --git a/data/2025/2504_05xxx/2504.05979/images/e9707ee40f1e21949b575b798c93018191eaeb0953a7978ef48d77face6376a1.jpg b/data/2025/2504_05xxx/2504.05979/images/e9707ee40f1e21949b575b798c93018191eaeb0953a7978ef48d77face6376a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..070d2cdd364287296c0a3ec749899fe81d6e06f9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e9707ee40f1e21949b575b798c93018191eaeb0953a7978ef48d77face6376a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20efcf81c075d41f04d65c69753f51ba48f7b5e48481fe80e4b6da647144d816 +size 9865 diff --git a/data/2025/2504_05xxx/2504.05979/images/e9ec594631498e2a1f8e661bedae31c443f9b190e1be8f521a61188f4d8f033b.jpg b/data/2025/2504_05xxx/2504.05979/images/e9ec594631498e2a1f8e661bedae31c443f9b190e1be8f521a61188f4d8f033b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b93450c8a3fbfb01e55e9140381d893519457d3e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/e9ec594631498e2a1f8e661bedae31c443f9b190e1be8f521a61188f4d8f033b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78a9cb39552da7748a7d79a7a808745ded1ea255958310aa8aab4a965cd9c7a3 +size 5939 diff --git a/data/2025/2504_05xxx/2504.05979/images/ea1923c868229c788db11eb4c4c0a0c7fdfff3244b7867c71f36ecc71509d140.jpg b/data/2025/2504_05xxx/2504.05979/images/ea1923c868229c788db11eb4c4c0a0c7fdfff3244b7867c71f36ecc71509d140.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d943de20bc51c33ad11f79fff0858bbb9c464ad6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ea1923c868229c788db11eb4c4c0a0c7fdfff3244b7867c71f36ecc71509d140.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57e00b1364dfd1a344555493101443748ad2a9f8a7771f08e036498775a9a96b +size 23102 diff --git a/data/2025/2504_05xxx/2504.05979/images/ea5e48f5561ad6c4fe1193a647524607970cfe7cdc63cd079944cf82880cecfe.jpg b/data/2025/2504_05xxx/2504.05979/images/ea5e48f5561ad6c4fe1193a647524607970cfe7cdc63cd079944cf82880cecfe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..25676a810a6a104ee7e995948411418e8a66cec9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ea5e48f5561ad6c4fe1193a647524607970cfe7cdc63cd079944cf82880cecfe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:093f64e542d465b1157317abd3bd67597e07a670eaf48ac2b5d579cc7cb70029 +size 3694 diff --git a/data/2025/2504_05xxx/2504.05979/images/eabf80aab7589d05f040c765b5655db13fd6ccc0f85020e3be5daad50092f0f9.jpg b/data/2025/2504_05xxx/2504.05979/images/eabf80aab7589d05f040c765b5655db13fd6ccc0f85020e3be5daad50092f0f9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..47c9b36b8251c5477e135def5334e0790e1333cb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/eabf80aab7589d05f040c765b5655db13fd6ccc0f85020e3be5daad50092f0f9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acd81c87e90e8546b8b3f11a9fbdfaf40cad0a97ef006ad379423993ad26f4a3 +size 11837 diff --git a/data/2025/2504_05xxx/2504.05979/images/eae0730d55066371b3326e9ed3ab31f01ca11d2267e31b1a8c17fc7663f2714f.jpg b/data/2025/2504_05xxx/2504.05979/images/eae0730d55066371b3326e9ed3ab31f01ca11d2267e31b1a8c17fc7663f2714f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6393441998597f0f779e171d763a44edacb4a7e7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/eae0730d55066371b3326e9ed3ab31f01ca11d2267e31b1a8c17fc7663f2714f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f3f42b63f2d10f1f305e50930102fb5be69a28ad53ce732e7f97ca45f4b4454 +size 10296 diff --git a/data/2025/2504_05xxx/2504.05979/images/eb143f4361a33ce174515f230c11a216ac7590d56a2fc328a2adc862245f0ce7.jpg b/data/2025/2504_05xxx/2504.05979/images/eb143f4361a33ce174515f230c11a216ac7590d56a2fc328a2adc862245f0ce7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a000a09b633fa7b9cde30fe481ea8ae67b16f56a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/eb143f4361a33ce174515f230c11a216ac7590d56a2fc328a2adc862245f0ce7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5ce617ca9a3be5cb4fee6d5143759d8b4d1cdffcf0974881dcb30efd2b68790 +size 19909 diff --git a/data/2025/2504_05xxx/2504.05979/images/eb16bd445147255cf3980142ad981069b415244e78b0a5cc390ff3c9e7af61b2.jpg b/data/2025/2504_05xxx/2504.05979/images/eb16bd445147255cf3980142ad981069b415244e78b0a5cc390ff3c9e7af61b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f5232cbede7002f2243b0ee52c669aa19c92521f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/eb16bd445147255cf3980142ad981069b415244e78b0a5cc390ff3c9e7af61b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b686fcccbc550402788dec38084252bb0c1af2f7fb527f3ac53a54f7ddd0cc28 +size 13906 diff --git a/data/2025/2504_05xxx/2504.05979/images/eb86efcba3a9c240da3e722bda20727de1f615bfe422fab64ff3b091cd85df06.jpg b/data/2025/2504_05xxx/2504.05979/images/eb86efcba3a9c240da3e722bda20727de1f615bfe422fab64ff3b091cd85df06.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9413087dca07a4b61dc846fea20064ca3f3ce62f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/eb86efcba3a9c240da3e722bda20727de1f615bfe422fab64ff3b091cd85df06.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb43bafa9e2a3690e53c01264e64e8a209244cf672050d31057708d1bd3a0de3 +size 4811 diff --git a/data/2025/2504_05xxx/2504.05979/images/eba5be0e2a33e5791d2df2eae6ba493b1daf01dd71d6fba92adff187a344d471.jpg b/data/2025/2504_05xxx/2504.05979/images/eba5be0e2a33e5791d2df2eae6ba493b1daf01dd71d6fba92adff187a344d471.jpg new file mode 100644 index 0000000000000000000000000000000000000000..877e77bcd1ea975cbb6fe6bb667635fd071d2b4a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/eba5be0e2a33e5791d2df2eae6ba493b1daf01dd71d6fba92adff187a344d471.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95d1f5d5268a3f8e618793e0859adfb2a7a1d0af4b8d4567386871e5c84e8da8 +size 17201 diff --git a/data/2025/2504_05xxx/2504.05979/images/ec488303c09dbf9dfb728fd1952368c1c3fa85f708e3b290ecd1a36b50ef4803.jpg b/data/2025/2504_05xxx/2504.05979/images/ec488303c09dbf9dfb728fd1952368c1c3fa85f708e3b290ecd1a36b50ef4803.jpg new file mode 100644 index 0000000000000000000000000000000000000000..79817633205438ee15eb95f8d582d6286b4960b4 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ec488303c09dbf9dfb728fd1952368c1c3fa85f708e3b290ecd1a36b50ef4803.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8553f70d7d21d85c53caabf50d4866ad6cbe5a77705efbf2d18a4e89e4ede8c0 +size 12856 diff --git a/data/2025/2504_05xxx/2504.05979/images/ecba2242bac62c8676019c58e03ec1fc8ba9f66e7431827caf98cf3565ae7429.jpg b/data/2025/2504_05xxx/2504.05979/images/ecba2242bac62c8676019c58e03ec1fc8ba9f66e7431827caf98cf3565ae7429.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a90ee1e93c23e400344185cf82d76a2871dc4b8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ecba2242bac62c8676019c58e03ec1fc8ba9f66e7431827caf98cf3565ae7429.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf2d6f87bf440bc986865080f932fd91124b70d1ccb1b0398de6bec52858dfaf +size 11345 diff --git a/data/2025/2504_05xxx/2504.05979/images/ecbf422861a6f3acb1bf2fa0c11269caa5fa86090bdbdd18230b1dcaaeb183d8.jpg b/data/2025/2504_05xxx/2504.05979/images/ecbf422861a6f3acb1bf2fa0c11269caa5fa86090bdbdd18230b1dcaaeb183d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..171f9b3a20e5fb675d99fa223b346ad934c3ad87 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ecbf422861a6f3acb1bf2fa0c11269caa5fa86090bdbdd18230b1dcaaeb183d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46813c74c356424e0b03e59974918c8cabdf50c30595c70b5c93688ac6c04864 +size 8999 diff --git a/data/2025/2504_05xxx/2504.05979/images/ecc01e85a79c76a542d72a3c3811916efd3af7089be84ce35914d93646b60cbd.jpg b/data/2025/2504_05xxx/2504.05979/images/ecc01e85a79c76a542d72a3c3811916efd3af7089be84ce35914d93646b60cbd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d15ce738b2ad6d5d61ac99cd3b7205aa5126d8e7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ecc01e85a79c76a542d72a3c3811916efd3af7089be84ce35914d93646b60cbd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db17a0d4bae5c20be7e1f9593109741e3dd5742ba301309b7a0455a4ee161fe3 +size 6008 diff --git a/data/2025/2504_05xxx/2504.05979/images/ed7f35bce8d09ae0a591d4b7e14edd5bbe5913c727af2763219ee8eccf546612.jpg b/data/2025/2504_05xxx/2504.05979/images/ed7f35bce8d09ae0a591d4b7e14edd5bbe5913c727af2763219ee8eccf546612.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0700cb0880b17a9e8baf084822dbbf6395796afc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ed7f35bce8d09ae0a591d4b7e14edd5bbe5913c727af2763219ee8eccf546612.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:397403425ce0bf0f745a470ce3265043ef071f5b41d68bd6037b20c6842fd246 +size 14758 diff --git a/data/2025/2504_05xxx/2504.05979/images/ee337d58007e700256aa61fb4d68f998f7a15c4a2662d8db4f8d622da1b1e1fb.jpg b/data/2025/2504_05xxx/2504.05979/images/ee337d58007e700256aa61fb4d68f998f7a15c4a2662d8db4f8d622da1b1e1fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b35ca3db259632bf356106c61d31c17702257759 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ee337d58007e700256aa61fb4d68f998f7a15c4a2662d8db4f8d622da1b1e1fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87bf54537c9839e4434dc0fad4c4b622b5682f3860e867138d23ed2fa1bf900d +size 9115 diff --git a/data/2025/2504_05xxx/2504.05979/images/ee95015664fc94518401d21a2920ffd79e12d1ce90aaee7e7c2e23d8fb252353.jpg b/data/2025/2504_05xxx/2504.05979/images/ee95015664fc94518401d21a2920ffd79e12d1ce90aaee7e7c2e23d8fb252353.jpg new file mode 100644 index 0000000000000000000000000000000000000000..01acbb7d4f3d1f4b3bebf3de846a71c203129735 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ee95015664fc94518401d21a2920ffd79e12d1ce90aaee7e7c2e23d8fb252353.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e3c888d4257a856630a10f93ccb19b44a689217a8d08670ca586905342cb4e1 +size 8502 diff --git a/data/2025/2504_05xxx/2504.05979/images/ef1d1bd4564e7e63c547c011879fd964bcadd7553b251ad9931c404cc35e496b.jpg b/data/2025/2504_05xxx/2504.05979/images/ef1d1bd4564e7e63c547c011879fd964bcadd7553b251ad9931c404cc35e496b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..edf9646cbee34bc223c089a663704adc3dcd1cfb --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ef1d1bd4564e7e63c547c011879fd964bcadd7553b251ad9931c404cc35e496b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a95913b9be899fa2b1166e7a3d2176aa41620926db1f2023825b06c5d4146be +size 11499 diff --git a/data/2025/2504_05xxx/2504.05979/images/ef305485daf2aa15d9580d3579fba6ec68a2f97825b971e73b7dda80fc7b27d3.jpg b/data/2025/2504_05xxx/2504.05979/images/ef305485daf2aa15d9580d3579fba6ec68a2f97825b971e73b7dda80fc7b27d3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..150855083b0ef3a804d9af865f163160dcb53978 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ef305485daf2aa15d9580d3579fba6ec68a2f97825b971e73b7dda80fc7b27d3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6d82c56794385428573da61693f61c9b9105480a730dce9296e67607701babf +size 8169 diff --git a/data/2025/2504_05xxx/2504.05979/images/ef4e183b7dfd1b0ebe989bb80028076acf9467e603aef017798bb91bb385489e.jpg b/data/2025/2504_05xxx/2504.05979/images/ef4e183b7dfd1b0ebe989bb80028076acf9467e603aef017798bb91bb385489e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..895eacc51e54c521bf98a893bf24c18148a7cc40 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/ef4e183b7dfd1b0ebe989bb80028076acf9467e603aef017798bb91bb385489e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3297a32d85734a96ce83d37eed752082dbd389846bdbdd90dce4e2727a256a1 +size 13723 diff --git a/data/2025/2504_05xxx/2504.05979/images/eff06e3cb3442fb9e89cec40098e50870a9ded39e1f69753076da696882aa86d.jpg b/data/2025/2504_05xxx/2504.05979/images/eff06e3cb3442fb9e89cec40098e50870a9ded39e1f69753076da696882aa86d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b258d3c82f949cde7851581e8f4a51358f567bc6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/eff06e3cb3442fb9e89cec40098e50870a9ded39e1f69753076da696882aa86d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08463a7c0076f6c5c0950ec26acb72ec6446f7020c58dd147fa92e420d2acff0 +size 14733 diff --git a/data/2025/2504_05xxx/2504.05979/images/f00108f1137a655c41931a3eca3d59fb3d37c83f487aa21775f7ef258746a7e5.jpg b/data/2025/2504_05xxx/2504.05979/images/f00108f1137a655c41931a3eca3d59fb3d37c83f487aa21775f7ef258746a7e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b4a70389b373721523493217f545c436445463c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f00108f1137a655c41931a3eca3d59fb3d37c83f487aa21775f7ef258746a7e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92a6744108f9f1d7a5bfee4ec6c17b6e1926ee83770a1f1239ab5f02714ed182 +size 7702 diff --git a/data/2025/2504_05xxx/2504.05979/images/f016fb55ce2f2b9f48a4d23f36970310500e1250abf50d5e88f1902507f424c6.jpg b/data/2025/2504_05xxx/2504.05979/images/f016fb55ce2f2b9f48a4d23f36970310500e1250abf50d5e88f1902507f424c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e922dbd00613568406170c757cfe4a1db40e70dd --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f016fb55ce2f2b9f48a4d23f36970310500e1250abf50d5e88f1902507f424c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d47d2379d66c0b24d85a994e005049d265bfcce4ba3e52d3d772c5196b5ad73 +size 15018 diff --git a/data/2025/2504_05xxx/2504.05979/images/f0852ac207fe5ce1fa4118aefca299ece81ad07b3b1dabb16edd46a65c5a542c.jpg b/data/2025/2504_05xxx/2504.05979/images/f0852ac207fe5ce1fa4118aefca299ece81ad07b3b1dabb16edd46a65c5a542c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..49ecc91f964bfa475d94747eccbb9a5324fb1dad --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f0852ac207fe5ce1fa4118aefca299ece81ad07b3b1dabb16edd46a65c5a542c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e8c88eb9ff2b0d0e15c3e047ef24d5579d1a59e3acf5ee2f43f9cf73720155b +size 9383 diff --git a/data/2025/2504_05xxx/2504.05979/images/f13c64cd986d4479510b7a4819c3393ff74c88928cb3d8ab9642dde087405fb8.jpg b/data/2025/2504_05xxx/2504.05979/images/f13c64cd986d4479510b7a4819c3393ff74c88928cb3d8ab9642dde087405fb8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b13d60d92bf2e2fdacd11579444e06d8d6a424a1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f13c64cd986d4479510b7a4819c3393ff74c88928cb3d8ab9642dde087405fb8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4db2c3428befaa462ba992f01a242a56f1c9d388ff4568cb931e8f40d45c012a +size 9470 diff --git a/data/2025/2504_05xxx/2504.05979/images/f1c73606a27398749d1cdb7f461d7eb75573cba818d1bde958bce44ebacb6dbd.jpg b/data/2025/2504_05xxx/2504.05979/images/f1c73606a27398749d1cdb7f461d7eb75573cba818d1bde958bce44ebacb6dbd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0065bf7707b6d0919ce58383d0e094517fe1f61 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f1c73606a27398749d1cdb7f461d7eb75573cba818d1bde958bce44ebacb6dbd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e91e94061f79a4f6b3dec25c2a88eb707c3db588ce6324ec254662b7f4187b5d +size 13296 diff --git a/data/2025/2504_05xxx/2504.05979/images/f1f8095ce03c55845db1331630f40c1812ffda931409ba5f89385376c17bfa67.jpg b/data/2025/2504_05xxx/2504.05979/images/f1f8095ce03c55845db1331630f40c1812ffda931409ba5f89385376c17bfa67.jpg new file mode 100644 index 0000000000000000000000000000000000000000..52d6534f8ce1feefdd7ce2a31283e666f01075fa --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f1f8095ce03c55845db1331630f40c1812ffda931409ba5f89385376c17bfa67.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91878560149a37f2af7798ebefa4410d966bfa14ff7fd73d9df5fa4293accfe3 +size 8783 diff --git a/data/2025/2504_05xxx/2504.05979/images/f206c5532d96bfcc1cf723bce40c254819c6dcc8f7c3dab25bd20471924a9c5d.jpg b/data/2025/2504_05xxx/2504.05979/images/f206c5532d96bfcc1cf723bce40c254819c6dcc8f7c3dab25bd20471924a9c5d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c450a2f36b6f7e6dd86cf711bb545540e8aed058 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f206c5532d96bfcc1cf723bce40c254819c6dcc8f7c3dab25bd20471924a9c5d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1cf3696277382e706d2f3bcd9d801061212aefc0d6e6aa2f8c3e522e137764a +size 9967 diff --git a/data/2025/2504_05xxx/2504.05979/images/f2338a3386d7dafb1d4ed2f0d097545a66aaae303a0ec9c364235ccd56f6fb11.jpg b/data/2025/2504_05xxx/2504.05979/images/f2338a3386d7dafb1d4ed2f0d097545a66aaae303a0ec9c364235ccd56f6fb11.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5676ae98a41de94cdf85ad186fd4e63f67599b5d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f2338a3386d7dafb1d4ed2f0d097545a66aaae303a0ec9c364235ccd56f6fb11.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79ade91414c56697a7915dfd344c3afb565efd8697be9fa581fe2e51a07af39c +size 12545 diff --git a/data/2025/2504_05xxx/2504.05979/images/f266fde975c8808d3f3339c241e64e53840be40a286abf4b782d7dec9c606c51.jpg b/data/2025/2504_05xxx/2504.05979/images/f266fde975c8808d3f3339c241e64e53840be40a286abf4b782d7dec9c606c51.jpg new file mode 100644 index 0000000000000000000000000000000000000000..982167833ba5e5089c41a22b5522b4ff84c92cf5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f266fde975c8808d3f3339c241e64e53840be40a286abf4b782d7dec9c606c51.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aaf284228cb3c160767f7222494d2c06e57258e094d611a23b43ef625ae46f29 +size 7601 diff --git a/data/2025/2504_05xxx/2504.05979/images/f2a66d298cf6681e639f60487d91eb2376e9527b2385d109001392557fd3fc3c.jpg b/data/2025/2504_05xxx/2504.05979/images/f2a66d298cf6681e639f60487d91eb2376e9527b2385d109001392557fd3fc3c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3547d1519c6a456b798977d32e7c5f88d09a33b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f2a66d298cf6681e639f60487d91eb2376e9527b2385d109001392557fd3fc3c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71da449ec62373e7ad582344b1b65770e6078e2ad2a9748ec1ec98a42ecff90d +size 17029 diff --git a/data/2025/2504_05xxx/2504.05979/images/f333d581ad570f753e99a7e73c99d827c89d746ae6f1f549c53cf30cf7e7ea33.jpg b/data/2025/2504_05xxx/2504.05979/images/f333d581ad570f753e99a7e73c99d827c89d746ae6f1f549c53cf30cf7e7ea33.jpg new file mode 100644 index 0000000000000000000000000000000000000000..62aec06f6305df770bfb817f7e3d2e520122f283 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f333d581ad570f753e99a7e73c99d827c89d746ae6f1f549c53cf30cf7e7ea33.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8dc9fd41d74bee13694d37e684766835415a3fe28c483fcbe112a9c0e57c9f2 +size 12683 diff --git a/data/2025/2504_05xxx/2504.05979/images/f36d385ab421fbf6d5de21b9a66c5a418a213e03c378e1602b8431ffa1b78dec.jpg b/data/2025/2504_05xxx/2504.05979/images/f36d385ab421fbf6d5de21b9a66c5a418a213e03c378e1602b8431ffa1b78dec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2dc9b9dac30efd668c80e38d428191359bea5f18 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f36d385ab421fbf6d5de21b9a66c5a418a213e03c378e1602b8431ffa1b78dec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ef61ad928d9cd58a38b2fc034b2dc328d5426be2b583a9a2e6342891314ce02 +size 10692 diff --git a/data/2025/2504_05xxx/2504.05979/images/f4667249d38b5a17f284e530b43ccc19dae4b4df74a383b000d041b25e9cc575.jpg b/data/2025/2504_05xxx/2504.05979/images/f4667249d38b5a17f284e530b43ccc19dae4b4df74a383b000d041b25e9cc575.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a195f7ea213f06a2206332912b4530d3ecd1f440 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f4667249d38b5a17f284e530b43ccc19dae4b4df74a383b000d041b25e9cc575.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afbc596a0d927483221c48ff354cad574576b2fd284110e43d0e62ac0fa9cc12 +size 7260 diff --git a/data/2025/2504_05xxx/2504.05979/images/f4ad4ea5d8987493e7a7c6a58f347e56802ce312612d8ea2a144cda053cdf994.jpg b/data/2025/2504_05xxx/2504.05979/images/f4ad4ea5d8987493e7a7c6a58f347e56802ce312612d8ea2a144cda053cdf994.jpg new file mode 100644 index 0000000000000000000000000000000000000000..955b11490a003fa31ab9601acc92e0b4f2c91b2a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f4ad4ea5d8987493e7a7c6a58f347e56802ce312612d8ea2a144cda053cdf994.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8e8fb1d02462a99ee9a0fea1b548f11c9a05570f94e6947788d578a99f7128f +size 5318 diff --git a/data/2025/2504_05xxx/2504.05979/images/f4cd9c8e039a64fce1592ca2bb40e8f9f9bf8f8c530f65984c76ff6708a91c6f.jpg b/data/2025/2504_05xxx/2504.05979/images/f4cd9c8e039a64fce1592ca2bb40e8f9f9bf8f8c530f65984c76ff6708a91c6f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fee9f1407bd5aadb7b0e411b7437221b45ce6539 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f4cd9c8e039a64fce1592ca2bb40e8f9f9bf8f8c530f65984c76ff6708a91c6f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10d7ee92504180e40d6952e2b05e3f09103abeeb9e402ada55363fdfd617553b +size 14623 diff --git a/data/2025/2504_05xxx/2504.05979/images/f550448a4b54f7ddaffc6aa7b44e8f05342e71009989a87f527ce649b3de480a.jpg b/data/2025/2504_05xxx/2504.05979/images/f550448a4b54f7ddaffc6aa7b44e8f05342e71009989a87f527ce649b3de480a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..59fa449dd39f07c06254721ea9fa63dc11ca62df --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f550448a4b54f7ddaffc6aa7b44e8f05342e71009989a87f527ce649b3de480a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f732ab4c954bfa4e3439431c087baf7106e8d00e8cb37cdf9dc3988948248c2d +size 9368 diff --git a/data/2025/2504_05xxx/2504.05979/images/f58d9105160e737da34562b6c9c09dca8743d388c9ac9c2f0b8894657da4a521.jpg b/data/2025/2504_05xxx/2504.05979/images/f58d9105160e737da34562b6c9c09dca8743d388c9ac9c2f0b8894657da4a521.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0453f59a774f59d6917f1570681b5d8285115e72 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f58d9105160e737da34562b6c9c09dca8743d388c9ac9c2f0b8894657da4a521.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:200eb640c35e1ebf11a140e2d996c66f0e2adf2cfe87e7edc3be7386a220ba9c +size 16655 diff --git a/data/2025/2504_05xxx/2504.05979/images/f596fda615ad13c09d3de7a59c67e2adf0d99aa3880879a7dfca06b8ddd317e8.jpg b/data/2025/2504_05xxx/2504.05979/images/f596fda615ad13c09d3de7a59c67e2adf0d99aa3880879a7dfca06b8ddd317e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e921085e7de2e64a4ec457f933c855e4aa2beeae --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f596fda615ad13c09d3de7a59c67e2adf0d99aa3880879a7dfca06b8ddd317e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7e1ef653701c9e1797e4c37a78bb98e9cee9833592f14510a9159af6d6e16e5 +size 20287 diff --git a/data/2025/2504_05xxx/2504.05979/images/f5a97061e0c642e8ad66652a68547da7ce296666b9869a78d2e0233544c9a488.jpg b/data/2025/2504_05xxx/2504.05979/images/f5a97061e0c642e8ad66652a68547da7ce296666b9869a78d2e0233544c9a488.jpg new file mode 100644 index 0000000000000000000000000000000000000000..26e9e2f150281c6983a724c7d960ca090a6de616 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f5a97061e0c642e8ad66652a68547da7ce296666b9869a78d2e0233544c9a488.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:100e4cb374717607cd497ac235a7b8adaef7fa693fee23e8b79857409bbbdd8e +size 15361 diff --git a/data/2025/2504_05xxx/2504.05979/images/f68ac7b3f9c3b4527403386e94ea70946346ec05ae2dde329bd666632c16d005.jpg b/data/2025/2504_05xxx/2504.05979/images/f68ac7b3f9c3b4527403386e94ea70946346ec05ae2dde329bd666632c16d005.jpg new file mode 100644 index 0000000000000000000000000000000000000000..229e207d380f73df1a87a2fbebfce5eec94b652a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f68ac7b3f9c3b4527403386e94ea70946346ec05ae2dde329bd666632c16d005.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dec567c89b32e619946acf4bd7c69c5994a43aef5b30c0da86784b835dd6336 +size 11687 diff --git a/data/2025/2504_05xxx/2504.05979/images/f69e3b0379013765f2adbc1aca797b0298039407cd2b3e804055f8aa7b5d7129.jpg b/data/2025/2504_05xxx/2504.05979/images/f69e3b0379013765f2adbc1aca797b0298039407cd2b3e804055f8aa7b5d7129.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7de00bc82b422d3d74a55d47c3fa0b953b3de201 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f69e3b0379013765f2adbc1aca797b0298039407cd2b3e804055f8aa7b5d7129.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e6bd77f0347c1814bb24eae226a160031be80d02deb8b2390aa47391c1959d7 +size 5784 diff --git a/data/2025/2504_05xxx/2504.05979/images/f723f0fa1807616a1f9fc4a326c92c0dbc8d41bfa409fc55bcd69f03cf0e4cc4.jpg b/data/2025/2504_05xxx/2504.05979/images/f723f0fa1807616a1f9fc4a326c92c0dbc8d41bfa409fc55bcd69f03cf0e4cc4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2f376ca27974b1a59aa53c4ddc27d08f37d3b3e0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f723f0fa1807616a1f9fc4a326c92c0dbc8d41bfa409fc55bcd69f03cf0e4cc4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51e313ba9505277f870ffdac7cb5e7c8940a94f12960ae4077b369228f9aa9e6 +size 8211 diff --git a/data/2025/2504_05xxx/2504.05979/images/f735b915cfa2ba7a85ad7998eea453cc54fd14b2e904d305b4cea528c2185f2a.jpg b/data/2025/2504_05xxx/2504.05979/images/f735b915cfa2ba7a85ad7998eea453cc54fd14b2e904d305b4cea528c2185f2a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f1bcbc8ed09a169f5a3104bac4b6208dd8a4832b --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f735b915cfa2ba7a85ad7998eea453cc54fd14b2e904d305b4cea528c2185f2a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:148150fd640d1cb1ad791ffd4376a6cc3c9c7ca011175bce17500df41f8b651e +size 8717 diff --git a/data/2025/2504_05xxx/2504.05979/images/f748ae519d07deeba518763dcddb8fac9d6dd17225e417618ff62357c72bb0c3.jpg b/data/2025/2504_05xxx/2504.05979/images/f748ae519d07deeba518763dcddb8fac9d6dd17225e417618ff62357c72bb0c3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0efb10313285242657c7fa12c0f732c769975cc2 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f748ae519d07deeba518763dcddb8fac9d6dd17225e417618ff62357c72bb0c3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28d588c0eba724b376af4137aec15d599af7b7c1f9443825d227e89761f337e9 +size 5095 diff --git a/data/2025/2504_05xxx/2504.05979/images/f7744cbca6f65d6b7b76f3ed15020d3fc9b6f8ed98f636028e5ced70384c52bd.jpg b/data/2025/2504_05xxx/2504.05979/images/f7744cbca6f65d6b7b76f3ed15020d3fc9b6f8ed98f636028e5ced70384c52bd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..355394438c774a200f50a171149679681b60cb72 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f7744cbca6f65d6b7b76f3ed15020d3fc9b6f8ed98f636028e5ced70384c52bd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39b4facedc3105c828d81a5f2d9508975544e4853f02df8b05b4ba52ff4f4242 +size 10618 diff --git a/data/2025/2504_05xxx/2504.05979/images/f77b58aea411a06d091798fd19812f2d11b864849cf60dea1419dbb96bdeab4b.jpg b/data/2025/2504_05xxx/2504.05979/images/f77b58aea411a06d091798fd19812f2d11b864849cf60dea1419dbb96bdeab4b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9ff95d133dc496ae8889fcd0aaa4612d6e5213d5 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f77b58aea411a06d091798fd19812f2d11b864849cf60dea1419dbb96bdeab4b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6890bc39d48c401f69f4a5b5735bee495164b62419609b73c4195728fca1fe8d +size 5302 diff --git a/data/2025/2504_05xxx/2504.05979/images/f7a549c39bdf0880255cebc34e0049aef8e949afe607ba2d7ca38743e5ee21e8.jpg b/data/2025/2504_05xxx/2504.05979/images/f7a549c39bdf0880255cebc34e0049aef8e949afe607ba2d7ca38743e5ee21e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5539d8c57d9f6b73977bcb920c296508698eb4f9 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f7a549c39bdf0880255cebc34e0049aef8e949afe607ba2d7ca38743e5ee21e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8433591dd913a893f0faefe209dd8a8aefcfdaff2cf645adb65d23e08064bda7 +size 8992 diff --git a/data/2025/2504_05xxx/2504.05979/images/f8056f09456f348f5dfebcc2e29caddc6eaffd73644a4b2e58c236fd4f647220.jpg b/data/2025/2504_05xxx/2504.05979/images/f8056f09456f348f5dfebcc2e29caddc6eaffd73644a4b2e58c236fd4f647220.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b803c21c168065256aa4a9acdecee7caccccd342 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f8056f09456f348f5dfebcc2e29caddc6eaffd73644a4b2e58c236fd4f647220.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ccc0f5509e6849414f808119bd6f4fe8b806a6381851bd05a11bca97e099154 +size 13733 diff --git a/data/2025/2504_05xxx/2504.05979/images/f835ef19b16a59831866bcf40d9d9bb5ac713048e65f176650e206cc3a2dd8ec.jpg b/data/2025/2504_05xxx/2504.05979/images/f835ef19b16a59831866bcf40d9d9bb5ac713048e65f176650e206cc3a2dd8ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04a42fe8b7580609b497df3a3409b72aa24e446f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f835ef19b16a59831866bcf40d9d9bb5ac713048e65f176650e206cc3a2dd8ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ce4cae1907c15fcaa1162c22a4f3ee93fe8eaf4975f5037b4b9b99ae9ab4148 +size 7859 diff --git a/data/2025/2504_05xxx/2504.05979/images/f8b04a681ef9c0a1c0fb445e1ea16e42cbc375d735002dad0a66db67be113463.jpg b/data/2025/2504_05xxx/2504.05979/images/f8b04a681ef9c0a1c0fb445e1ea16e42cbc375d735002dad0a66db67be113463.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6de28716e85870f094a9fe8e91ec3974cac3a8ed --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f8b04a681ef9c0a1c0fb445e1ea16e42cbc375d735002dad0a66db67be113463.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3bf1f1cc0f60c9948aa360331d2fa64e3a302679672667675e0f5f622fda897 +size 10227 diff --git a/data/2025/2504_05xxx/2504.05979/images/f90e61bd1f023e6f33aef6353e15aca3f50253a83831ca3012b01fddcbeee732.jpg b/data/2025/2504_05xxx/2504.05979/images/f90e61bd1f023e6f33aef6353e15aca3f50253a83831ca3012b01fddcbeee732.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c6b27dfc96512a3628b7e2013857276deb874521 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f90e61bd1f023e6f33aef6353e15aca3f50253a83831ca3012b01fddcbeee732.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31d7b30ce518b929420e5cb4ad102e10a027abc736eb7cbf6641a2d7a004cf74 +size 5954 diff --git a/data/2025/2504_05xxx/2504.05979/images/f91d6cbc924b18c4e72f2efd1f2c0ffe7a85d740967c0f696474bad835204b3e.jpg b/data/2025/2504_05xxx/2504.05979/images/f91d6cbc924b18c4e72f2efd1f2c0ffe7a85d740967c0f696474bad835204b3e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..06ba67f31fa1741dc1b098b198444aaf07d5db2f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f91d6cbc924b18c4e72f2efd1f2c0ffe7a85d740967c0f696474bad835204b3e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:451ad9bf2ba8aabc736e6c6bff66309b7fe0add335c414bbb6ebb793dbde0ab0 +size 14696 diff --git a/data/2025/2504_05xxx/2504.05979/images/f91e7c322f8dc4a9e7e58df1c8b1d035d289d156992fbfb80f834d5f06e3e4d2.jpg b/data/2025/2504_05xxx/2504.05979/images/f91e7c322f8dc4a9e7e58df1c8b1d035d289d156992fbfb80f834d5f06e3e4d2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8194ce3853f4e44357a3559707a1a649045614c7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f91e7c322f8dc4a9e7e58df1c8b1d035d289d156992fbfb80f834d5f06e3e4d2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03c6cc3ee95590125575c063c0d897494a6062502edff4c3c8c4d5b0b9ee6dae +size 11182 diff --git a/data/2025/2504_05xxx/2504.05979/images/f93d94cd33ec493cea22f6299c05435ef6df644e620ad03fc4906616201dd94b.jpg b/data/2025/2504_05xxx/2504.05979/images/f93d94cd33ec493cea22f6299c05435ef6df644e620ad03fc4906616201dd94b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c71d408a32330cb5dbf872c549e51252fd882402 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f93d94cd33ec493cea22f6299c05435ef6df644e620ad03fc4906616201dd94b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:241c5490a1329655102c01ae50b5f5fd1f87a0c0e295e3843ea45b06e4d467d8 +size 1021 diff --git a/data/2025/2504_05xxx/2504.05979/images/f93e5e0c608562728e8855f8158f8f8dfca8a94d4114bd194f042654bffe10c0.jpg b/data/2025/2504_05xxx/2504.05979/images/f93e5e0c608562728e8855f8158f8f8dfca8a94d4114bd194f042654bffe10c0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b551e80f1ef73006851c839a39f90c1fcd314e32 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f93e5e0c608562728e8855f8158f8f8dfca8a94d4114bd194f042654bffe10c0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37a19725a77673b245916980452a2a0fbbbee4df16c4a5fd8d365f314ee1874e +size 8233 diff --git a/data/2025/2504_05xxx/2504.05979/images/f99ed011b160f34447b7db945e73c93bb19c1fb9a7888322a3f61e89fa0b9b59.jpg b/data/2025/2504_05xxx/2504.05979/images/f99ed011b160f34447b7db945e73c93bb19c1fb9a7888322a3f61e89fa0b9b59.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0025d467f8f1a1caa3d35ae9277544ad147d33e6 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/f99ed011b160f34447b7db945e73c93bb19c1fb9a7888322a3f61e89fa0b9b59.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c322f8a97caa80caaa40956123adc48117e47990c122b61763649ddaae8fe4e +size 10506 diff --git a/data/2025/2504_05xxx/2504.05979/images/fa73cd36deac40a3a5de512bdd640d92cbdd918be0feb815b9659870541d646f.jpg b/data/2025/2504_05xxx/2504.05979/images/fa73cd36deac40a3a5de512bdd640d92cbdd918be0feb815b9659870541d646f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a372911757b7b4c505386fff965a6974630d01e7 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/fa73cd36deac40a3a5de512bdd640d92cbdd918be0feb815b9659870541d646f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35206bc45405bfdb14e4a0caa329b356e294446e9061ad14752f6a3486f46453 +size 15292 diff --git a/data/2025/2504_05xxx/2504.05979/images/faf4eca5a3720dff227ce6a4ad8f4898de848e4e08fec90adf87952d14446f7e.jpg b/data/2025/2504_05xxx/2504.05979/images/faf4eca5a3720dff227ce6a4ad8f4898de848e4e08fec90adf87952d14446f7e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb0ebfc35d0a2e42a54b84565b1bcd835f0338c1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/faf4eca5a3720dff227ce6a4ad8f4898de848e4e08fec90adf87952d14446f7e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:401e3ba11b3fe658632a37d2abacac805df11f91d802f29d1f3d23b870626987 +size 29573 diff --git a/data/2025/2504_05xxx/2504.05979/images/fafbcde737a7a7fc8337bc987984a76a03a10ed89b37798dab11796bc6e03f52.jpg b/data/2025/2504_05xxx/2504.05979/images/fafbcde737a7a7fc8337bc987984a76a03a10ed89b37798dab11796bc6e03f52.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aee8c3a6811d83c5ed63c9acc2ba13bd501931df --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/fafbcde737a7a7fc8337bc987984a76a03a10ed89b37798dab11796bc6e03f52.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1f73a74be7128e74d5de016c4ff8637bbcc99d53b5816bcca47c552ddf9ada2 +size 12800 diff --git a/data/2025/2504_05xxx/2504.05979/images/fbb3f36e987742956eb393471e0d4822531b76d7cb4cd749008a8301e0d7abaf.jpg b/data/2025/2504_05xxx/2504.05979/images/fbb3f36e987742956eb393471e0d4822531b76d7cb4cd749008a8301e0d7abaf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8a5ad10408da631bc06da5f92fbedc2d2bec23e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/fbb3f36e987742956eb393471e0d4822531b76d7cb4cd749008a8301e0d7abaf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:473241aa55b289d8d764abcdf51fbba2890d27590dba744b62ad81cc5cf0390c +size 10902 diff --git a/data/2025/2504_05xxx/2504.05979/images/fbc284435daba3dd588193455507d1744f38c42e568b7de9d51aacf24568bbd6.jpg b/data/2025/2504_05xxx/2504.05979/images/fbc284435daba3dd588193455507d1744f38c42e568b7de9d51aacf24568bbd6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a43de2040817f9681fa09848e13ab86e110155d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/fbc284435daba3dd588193455507d1744f38c42e568b7de9d51aacf24568bbd6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25f4c181188b07ef63301acc7b0735854cf737a05bc2e720818f0d0f34255b9c +size 14980 diff --git a/data/2025/2504_05xxx/2504.05979/images/fbfad470b3b72d3478e91210af29255466f94c98c96fb529b634e931bd2f9848.jpg b/data/2025/2504_05xxx/2504.05979/images/fbfad470b3b72d3478e91210af29255466f94c98c96fb529b634e931bd2f9848.jpg new file mode 100644 index 0000000000000000000000000000000000000000..332e8348407545125b15fe3bde201e094717739f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/fbfad470b3b72d3478e91210af29255466f94c98c96fb529b634e931bd2f9848.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d69d927c3ab4505543822ccfae10798c53e8552fe9d924d107c0d2c98848799 +size 10248 diff --git a/data/2025/2504_05xxx/2504.05979/images/fc8ea4a8390ad6f9e57e8f5aab23790b3bd6070cec727042772246ae96706efb.jpg b/data/2025/2504_05xxx/2504.05979/images/fc8ea4a8390ad6f9e57e8f5aab23790b3bd6070cec727042772246ae96706efb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f3dfe72fff3b140efe36054dfe28d7a880076fbc --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/fc8ea4a8390ad6f9e57e8f5aab23790b3bd6070cec727042772246ae96706efb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4cf3d22a6238de17049a1e93a2fc41579ac2122a50c5763f6b6c5857ff50b99 +size 13011 diff --git a/data/2025/2504_05xxx/2504.05979/images/fce6882ef70a409cc3043ec9739951fd8feb6eec25edb8d9c56a80dda6894b5f.jpg b/data/2025/2504_05xxx/2504.05979/images/fce6882ef70a409cc3043ec9739951fd8feb6eec25edb8d9c56a80dda6894b5f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30bf059710ac3609d9c00715ef7673870bfa6e9f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/fce6882ef70a409cc3043ec9739951fd8feb6eec25edb8d9c56a80dda6894b5f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37c2517edf756f292b08d487f694421da6eee7df31edcdbf36180e5220609588 +size 9139 diff --git a/data/2025/2504_05xxx/2504.05979/images/fce6b55e09781aa60141751625be4a55c7d0fd1eef584be53a6585c781eade33.jpg b/data/2025/2504_05xxx/2504.05979/images/fce6b55e09781aa60141751625be4a55c7d0fd1eef584be53a6585c781eade33.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ff4c2aedafb4552732f001150a6ef3253c9a1fb8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/fce6b55e09781aa60141751625be4a55c7d0fd1eef584be53a6585c781eade33.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84d99700a148c57988a55fb9971a319358eb0f59b50132932d2b172b658658f1 +size 20907 diff --git a/data/2025/2504_05xxx/2504.05979/images/fd02f523b32582d773f60da26889163fcfeb85e2ad61c1cbbf337661f949aa5b.jpg b/data/2025/2504_05xxx/2504.05979/images/fd02f523b32582d773f60da26889163fcfeb85e2ad61c1cbbf337661f949aa5b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..466fed152a4f7c91633bef460cafde00354d8085 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/fd02f523b32582d773f60da26889163fcfeb85e2ad61c1cbbf337661f949aa5b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7587dd1c621ebc9513b17c32817f5bb2230aed8e0e5e192f42ef4ac4008d27c5 +size 916 diff --git a/data/2025/2504_05xxx/2504.05979/images/fd3f62eae10787f13ed9ce858671690809ab040be8b604ced0e3044aef5bc8d9.jpg b/data/2025/2504_05xxx/2504.05979/images/fd3f62eae10787f13ed9ce858671690809ab040be8b604ced0e3044aef5bc8d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f4243d58f09d2fa319a7d7445012b5c34ad5dae --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/fd3f62eae10787f13ed9ce858671690809ab040be8b604ced0e3044aef5bc8d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:845ff192d842719e37b78273970d019361f3db12cad881e8157ca6c19a3b5301 +size 13769 diff --git a/data/2025/2504_05xxx/2504.05979/images/fd41fad2fe31ea862cb86cef72fd1fa75256598dab78c33edf42129c6eb8f4a1.jpg b/data/2025/2504_05xxx/2504.05979/images/fd41fad2fe31ea862cb86cef72fd1fa75256598dab78c33edf42129c6eb8f4a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2617d8b3e4e1d1752f2b92f85d08a8dc67ef171a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/fd41fad2fe31ea862cb86cef72fd1fa75256598dab78c33edf42129c6eb8f4a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04f19957b25035b98b8bb2656325a8beaa61c60ec35657e31b55d32eda246db1 +size 19622 diff --git a/data/2025/2504_05xxx/2504.05979/images/fd5210b3394235c1477fe979838a108f3de32982a0527224a4d83dc207ae62bc.jpg b/data/2025/2504_05xxx/2504.05979/images/fd5210b3394235c1477fe979838a108f3de32982a0527224a4d83dc207ae62bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28e986e5d4bd56eb1ec928cbe59be96dc946b380 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/fd5210b3394235c1477fe979838a108f3de32982a0527224a4d83dc207ae62bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b70a5bb2ed5dde9ccfb7a4bd4a7c279e1fabf3d8ce871ba7c1a9580870f83995 +size 11971 diff --git a/data/2025/2504_05xxx/2504.05979/images/fd89b62d113f6d9cde9f7af5c15f0c3435fb8051b9a81f77a0195174a31e7771.jpg b/data/2025/2504_05xxx/2504.05979/images/fd89b62d113f6d9cde9f7af5c15f0c3435fb8051b9a81f77a0195174a31e7771.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89489b8eb62b9e79beb8095de16fadfe878587c8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/fd89b62d113f6d9cde9f7af5c15f0c3435fb8051b9a81f77a0195174a31e7771.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cb287f31acd033fe172bae67154f3a78fee0f3a71555ad46bb8d1c4b7092596 +size 9773 diff --git a/data/2025/2504_05xxx/2504.05979/images/fe38be0bc78d431b48a8e0538e8cea074d5ce5e421511c2e081ea26e6ad8173c.jpg b/data/2025/2504_05xxx/2504.05979/images/fe38be0bc78d431b48a8e0538e8cea074d5ce5e421511c2e081ea26e6ad8173c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8942e26aaaf8ec8a36444e3a9969d0e5c727536e --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/fe38be0bc78d431b48a8e0538e8cea074d5ce5e421511c2e081ea26e6ad8173c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7929c3767669f75bd24c7549e8a72c1ec1a565d9d70394d1c399f36ead4b47b +size 8814 diff --git a/data/2025/2504_05xxx/2504.05979/images/fe6974e0162720276dc4d0b553ff8e8fdee18edf7ebaa81e761e6d01fa924c91.jpg b/data/2025/2504_05xxx/2504.05979/images/fe6974e0162720276dc4d0b553ff8e8fdee18edf7ebaa81e761e6d01fa924c91.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5eaf0c7e02e9ea687126326dee2b88ac1ad8fc92 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/images/fe6974e0162720276dc4d0b553ff8e8fdee18edf7ebaa81e761e6d01fa924c91.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edebfa69e1801ad8b638e2066ca309c272b961fb0c4ba57d4890199773467033 +size 4809 diff --git a/data/2025/2504_05xxx/2504.05979/layout.json b/data/2025/2504_05xxx/2504.05979/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..0a143a5b0fceec7849aa98e54b3bc6ccd6c203a0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05979/layout.json @@ -0,0 +1,83941 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 78, + 97, + 532, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 97, + 532, + 118 + ], + "spans": [ + { + "bbox": [ + 78, + 97, + 532, + 118 + ], + "type": "text", + "content": "An Empirical Study of GPT-4o Image Generation Capabilities" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "spans": [ + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": "Sixiang Chen" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Jinbin Bai" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Zhuoran Zhao" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Tian Ye" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Qingyu Shi" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Donghao Zhou" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Wenhao Chai" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Xin Lin" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Jianzong Wu" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Chao Tang" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Shilin Xu" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Tao Zhang" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Haobo Yuan" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Yikang Zhou" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Wei Chow" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Linfeng Li" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Xiangtai Li" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{3\\dagger}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Lei Zhu" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{1,7\\dagger}" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "text", + "content": ", Lu Qi" + }, + { + "bbox": [ + 84, + 157, + 533, + 195 + ], + "type": "inline_equation", + "content": "^{6\\dagger}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 94, + 195, + 523, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 195, + 523, + 230 + ], + "spans": [ + { + "bbox": [ + 94, + 195, + 523, + 230 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 94, + 195, + 523, + 230 + ], + "type": "text", + "content": "The Hong Kong University of Science and Technology (GZ) " + }, + { + "bbox": [ + 94, + 195, + 523, + 230 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 94, + 195, + 523, + 230 + ], + "type": "text", + "content": "National University of Singapore " + }, + { + "bbox": [ + 94, + 195, + 523, + 230 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 94, + 195, + 523, + 230 + ], + "type": "text", + "content": "Peking University " + }, + { + "bbox": [ + 94, + 195, + 523, + 230 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 94, + 195, + 523, + 230 + ], + "type": "text", + "content": "The Chinese University of Hong Kong " + }, + { + "bbox": [ + 94, + 195, + 523, + 230 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 94, + 195, + 523, + 230 + ], + "type": "text", + "content": "University of Washington " + }, + { + "bbox": [ + 94, + 195, + 523, + 230 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 94, + 195, + 523, + 230 + ], + "type": "text", + "content": "Wuhan University " + }, + { + "bbox": [ + 94, + 195, + 523, + 230 + ], + "type": "inline_equation", + "content": "^{7}" + }, + { + "bbox": [ + 94, + 195, + 523, + 230 + ], + "type": "text", + "content": "The Hong Kong University of Science and Technology" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 281, + 258, + 329, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 258, + 329, + 270 + ], + "spans": [ + { + "bbox": [ + 281, + 258, + 329, + 270 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 113, + 283, + 499, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 283, + 499, + 437 + ], + "spans": [ + { + "bbox": [ + 113, + 283, + 499, + 437 + ], + "type": "text", + "content": "The landscape of image generation has rapidly evolved, from early GAN-based approaches to diffusion models and, most recently, to unified generative architectures that seek to bridge understanding and generation tasks. Recent advances, especially the GPT-4o, have demonstrated the feasibility of high-fidelity multimodal generation, their architectural design remains mysterious and unpublished. This prompts the question of whether image and text generation have already been successfully integrated into a unified framework for those methods. In this work, we conduct an empirical study of GPT-4o's image generation capabilities, benchmarking it against leading open-source and commercial models. Our evaluation covers four main categories, including text-to-image, image-to-image, image-to-3D, and image-to-X generation, with more than 20 tasks. Our analysis highlights the strengths and limitations of GPT-4o under various settings, and situates it within the broader evolution of generative modeling. Through this investigation, we identify promising directions for future unified generative models, emphasizing the role of architectural design and data scaling. For a high-definition version of the PDF, please refer to the link on GitHub: https://github.com/Ephemeral182/Empirical-Study-of-GPT-4o-Image-Gen." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 78, + 458, + 164, + 471 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 458, + 164, + 471 + ], + "spans": [ + { + "bbox": [ + 78, + 458, + 164, + 471 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 76, + 483, + 533, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 483, + 533, + 552 + ], + "spans": [ + { + "bbox": [ + 76, + 483, + 533, + 552 + ], + "type": "text", + "content": "Over the past decade, image generation has undergone a remarkable evolution—from the early successes of GANs [35] to the dominance of diffusion models [89, 82, 26], which have significantly advanced image fidelity and diversity [37, 7]. In parallel, Large Language Models (LLMs) have achieved exceptional performance across diverse natural language tasks by scaling autoregressive next-token prediction, demonstrating the power of unified modeling principles. These advances naturally raise a compelling question: can such principles be extended to image generation?" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 76, + 554, + 532, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 554, + 532, + 643 + ], + "spans": [ + { + "bbox": [ + 76, + 554, + 532, + 643 + ], + "type": "text", + "content": "However, fundamental differences between autoregressive and diffusion-based paradigms present non-trivial challenges. Autoregressive models excel in sequential text generation, while diffusion models have become the de facto standard for high-quality image synthesis. Bridging these modalities within a unified framework remains an open challenge. Several works [96, 101, 100, 34, 24, 13] attempt to bridge this gap via multimodal connectors or instruction tuning, with LLMs serving as planning modules that produce intermediate representations for image generation. While effective to some extent, these paradigms often exhibit limited interaction between text and image modalities, and struggle with content consistency—particularly in image-to-image generation and complex instruction-based synthesis." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 77, + 647, + 533, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 647, + 533, + 693 + ], + "spans": [ + { + "bbox": [ + 77, + 647, + 533, + 693 + ], + "type": "text", + "content": "To address these limitations, recent research explores unified generation models that integrate understanding and generation within a single architecture, following three main technical paradigms. The first line of work represents both language and vision as discrete token sequences [67, 98, 110, 104, 19, 65, 109], leveraging VQGAN [28] or similar compressors to tokenize images for compatibility with autoregressive models. A second direction integrates" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.05979v2 [cs.CV] 10 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 91, + 700, + 432, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 700, + 432, + 712 + ], + "spans": [ + { + "bbox": [ + 91, + 700, + 432, + 712 + ], + "type": "text", + "content": "*Equal contributions. ☑: schen691@connect.hkust-gz.edu.cn † Corresponding authors." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 78, + 731, + 178, + 742 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 731, + 178, + 742 + ], + "spans": [ + { + "bbox": [ + 78, + 731, + 178, + 742 + ], + "type": "text", + "content": "Preprint. Work in progress." + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 77, + 72, + 533, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 72, + 533, + 161 + ], + "spans": [ + { + "bbox": [ + 77, + 72, + 533, + 161 + ], + "type": "text", + "content": "large language models directly into the diffusion process [128, 126, 112, 72], employing them as denoising backbones for image generation and as unified sequence models for text. While promising, these approaches typically rely on intermediate compression modules such as VAEs or VQVAEs, which may limit visual fidelity or increase architectural complexity. A third and increasingly prominent paradigm investigates discrete diffusion frameworks that natively support both image and text generation within a unified modeling space [71, 73, 93]. Building on this insight, recent works [58, 97] propose fully end-to-end diffusion architectures based on shared Transformer backbones, demonstrating competitive performance and seamless modality integration comparable to similarly sized LLMs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 165, + 533, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 165, + 533, + 189 + ], + "spans": [ + { + "bbox": [ + 77, + 165, + 533, + 189 + ], + "type": "text", + "content": "Despite these promising directions, such systems still lag behind the sophistication and generalization capabilities of proprietary models like Flux [51] and Midjourney [75], which may lack reasoning capabilities." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 192, + 533, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 192, + 533, + 258 + ], + "spans": [ + { + "bbox": [ + 77, + 192, + 533, + 258 + ], + "type": "text", + "content": "The recent release of GPT-4o [78] marks a significant milestone in multimodal generative modeling. As a native multimodal architecture, GPT-4o demonstrates strong capabilities in generating high-fidelity, photorealistic images while seamlessly unifying vision and language generation—reportedly in an autoregressive fashion. However, its closed-source nature—particularly the lack of disclosure about its architecture, training regimen, and inference mechanisms—poses substantial challenges for scientific scrutiny. This motivates a careful empirical assessment of its capabilities relative to open-source state-of-the-art models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 263, + 533, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 263, + 533, + 319 + ], + "spans": [ + { + "bbox": [ + 77, + 263, + 533, + 319 + ], + "type": "text", + "content": "Although the visual performance of GPT-4o and Gemini is widely recognized, much of their success likely stems from unprecedented scale in training data, model parameters, and compute resources. Prior studies, including diffusion models and connected-based models, suggest that scaling is a key enabler of generative quality—potentially more so than architectural novelty alone. These trends point to a promising trajectory for unified generative models: with sufficient scale, they may rival or even surpass today's best proprietary systems." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 323, + 533, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 323, + 533, + 369 + ], + "spans": [ + { + "bbox": [ + 77, + 323, + 533, + 369 + ], + "type": "text", + "content": "In this study, we conduct a comprehensive evaluation of GPT-4o's image generation performance, benchmarking its outputs against leading systems including Gemini 2.0 Flash Experimental [99] and other state-of-the-art models. Building upon our comparative evaluation across text-to-image, image-to-image, image-to-3D, and image-to-X generation tasks, GPT-4o demonstrates several distinctive strengths:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 377, + 532, + 590 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 105, + 377, + 532, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 377, + 532, + 422 + ], + "spans": [ + { + "bbox": [ + 105, + 377, + 532, + 422 + ], + "type": "text", + "content": "- Exceptional Text Rendering Capability. GPT-4o demonstrates exceptional capability in rendering textual elements within images, maintaining correct spelling, alignment, and formatting even in document-style generation tasks. This level of text fluency is rarely seen in prior models and is crucial for practical applications such as chart generation, document layout synthesis, and instruction-rich visual storytelling." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 426, + 532, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 426, + 532, + 470 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 532, + 470 + ], + "type": "text", + "content": "- Compositional Generalization and Prompt Following. GPT-4o displays impressive compositional abilities, accurately assembling complex scene elements, styles, or attributes described in prompts. This high prompt following enables it to handle fine-grained multi-attribute conditions in generation tasks with minimal loss of semantic detail." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 475, + 532, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 475, + 532, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 532, + 520 + ], + "type": "text", + "content": "- Spatial Reasoning and Multi-View Consistency. In generation tasks involving spatial manipulation, such as 3D view synthesis, camera control, and depth-conditioned rendering, GPT-4o maintains geometric consistency and viewpoint realism. This indicates an inherent capacity for spatial reasoning and structural awareness, even without explicit 3D modeling modules." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 524, + 532, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 524, + 532, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 524, + 532, + 590 + ], + "type": "text", + "content": "- Comprehensive Image Transformation Capability. GPT-4o shows strong generalization across a wide spectrum of image-to-image tasks, ranging from low-level image restoration to high-level perceptual understanding. Without task-specific tuning, it almost handles diverse transformations such as denoising, deblurring, relighting, segmentation, and depth estimation. This suggests the model has learned robust visual priors and spatial semantics, enabling it to perform correction and abstract structural prediction under a unified framework." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 77, + 599, + 531, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 599, + 531, + 623 + ], + "spans": [ + { + "bbox": [ + 77, + 599, + 531, + 623 + ], + "type": "text", + "content": "However, limitations remain in inconsistent generation, hallucination, and data bias in underrepresented cultural elements and non-Latin scripts, highlighting current trade-offs in model design and training data coverage." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 77, + 626, + 533, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 626, + 533, + 682 + ], + "spans": [ + { + "bbox": [ + 77, + 626, + 533, + 682 + ], + "type": "text", + "content": "While we do not analyze the internal architecture or implementation details of GPT-4o in this paper*, we believe it plays an important role toward unified multimodal generation. We also emphasize that model architecture is only one part of this progress—training data, model scale, and optimization strategies are equally important. We hope future work will provide more empirical evidence to better understand such proprietary systems and their position within this evolving research landscape." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 77, + 691, + 533, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 691, + 533, + 723 + ], + "spans": [ + { + "bbox": [ + 77, + 691, + 533, + 723 + ], + "type": "text", + "content": "*There is currently no definitive evidence regarding the specific implementation details or architectural design of GPT-4o's image generation capabilities. To ensure the credibility and accuracy of our analysis, we will refrain from making speculative claims in current version." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 71, + 155, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 71, + 155, + 83 + ], + "spans": [ + { + "bbox": [ + 78, + 71, + 155, + 83 + ], + "type": "text", + "content": "2 Evaluation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 95, + 533, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 95, + 533, + 129 + ], + "spans": [ + { + "bbox": [ + 77, + 95, + 533, + 129 + ], + "type": "text", + "content": "As GPT-4o's image generation capability has only recently been released and no API is available, we conduct only qualitative comparisons between GPT-4o, Gemini 2.0 Flash [99], and other state-of-the-art models in their respective domains." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 134, + 533, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 134, + 533, + 179 + ], + "spans": [ + { + "bbox": [ + 77, + 134, + 533, + 179 + ], + "type": "text", + "content": "To systematically compare these models' performance across diverse image generation tasks including text-to-image generation, image-to-image generation, text/image to 3D generation, and various image-to-X generation, we conduct a detailed case study focused on analyzing the performance of these models. This qualitative analysis provides insight into gpt 4o's strengths and limitations in various tasks, as shown in Table 1." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 185, + 532, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 185, + 532, + 210 + ], + "spans": [ + { + "bbox": [ + 77, + 185, + 532, + 210 + ], + "type": "text", + "content": "Low Visual Quality : The image synthesis model fails to generate fine-grained object details or produces blurry outputs. Typical cases include distorted human bodies or unrealistic hand shapes." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 216, + 531, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 216, + 531, + 239 + ], + "spans": [ + { + "bbox": [ + 77, + 216, + 531, + 239 + ], + "type": "text", + "content": "Inconsistent Generation : The image synthesis model produces inconsistent output or image details with input image." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 77, + 247, + 531, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 247, + 531, + 271 + ], + "spans": [ + { + "bbox": [ + 77, + 247, + 531, + 271 + ], + "type": "text", + "content": "Lack of Knowledge : The image synthesis model lacks domain-specific knowledge, such as particular artistic styles, and thus generates visually plausible but incorrect results." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 77, + 277, + 531, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 277, + 531, + 302 + ], + "spans": [ + { + "bbox": [ + 77, + 277, + 531, + 302 + ], + "type": "text", + "content": "Failure to Follow Instructions : The image synthesis model misinterprets the input prompt and produces inconsistent results. For example, it may fail to capture specified numbers, colors, or object arrangements." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 79, + 110, + 533, + 700 + ], + "blocks": [ + { + "bbox": [ + 127, + 91, + 481, + 104 + ], + "lines": [ + { + "bbox": [ + 127, + 91, + 481, + 104 + ], + "spans": [ + { + "bbox": [ + 127, + 91, + 481, + 104 + ], + "type": "text", + "content": "Table 1: GPT-4o vs. Baselines: Qualitative error analysis across image generation tasks." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 79, + 110, + 533, + 700 + ], + "lines": [ + { + "bbox": [ + 79, + 110, + 533, + 700 + ], + "spans": [ + { + "bbox": [ + 79, + 110, + 533, + 700 + ], + "type": "table", + "html": "
Case FigureMeta-taskSub-taskGPT-4oGemini-2.0-flashDomain-SOTA
Figure 1SuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 2Complex Text FollowingSuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 3SuccessSuccessSuccess
Figure 4SuccessSuccessSuccess
Figure 5SuccessSuccessSuccess
Figure 6Text-to-ImageText RenderingSuccessLow Visual QualityLow Visual Quality
Figure 7SuccessLow Visual QualityLow Visual Quality
Figure 8SuccessLow Visual QualityLow Visual Quality
Figure 9Document GenerationSuccessLow Visual QualityLow Visual Quality
Figure 10SuccessLow Visual QualityLow Visual Quality
Figure 11PanoramaLack of KnowledgeSuccessSuccess
Figure 12Style TransferSuccessLack of KnowledgeLack of Knowledge
Figure 13SuccessLack of KnowledgeLack of Knowledge
Figure 14Low Visual QualitySuccessFailure to Follow Instructions
Figure 15Image EditingFailure to Follow InstructionsFailure to Follow InstructionsFailure to Follow Instructions
Figure 16SuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 17SuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 18SuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 19SuccessInconsistent GenerationFailure to Follow Instructions
Figure 20Single-Concept CustomizationSuccessFailure to Follow InstructionsSuccess
Figure 21Multi-Concept CustomizationInconsistent GenerationInconsistent GenerationSuccess
Figure 22Story Image GenerationSuccessFailure to Follow InstructionsSuccess
Figure 23SuccessInconsistent GenerationSuccess
Figure 24Low-Level Vision-DenoisingLow Visual QualityLow Visual QualitySuccess
Figure 25Low-Level Vision-DerainingSuccessInconsistent GenerationSuccess
Figure 26Low-Level Vision-DehazingSuccessLow Visual QualitySuccess
Figure 27Low-Level Vision-Low Light EnhancementLow Visual QualityLow Visual QualitySuccess
Figure 28Low-Level Vision-DeblurringSuccessLow Visual QualitySuccess
Figure 29Low-Level Vision-Super ResolutionSuccessLow Visual QualitySuccess
Figure 30Low-Level Vision-ImpaintingInconsistent GenerationInconsistent GenerationSuccess
Figure 31Low-Level Vision-OutpaintingInconsistent GenerationSuccessSuccess
Figure 32Low-Level Vision-ColorizationSuccessSuccessSuccess
Figure 33Low-Level Vision-Shadow RemovalSuccessFailure to Follow InstructionsSuccess
Figure 34Low-Level Vision-Reflection RemovalInconsistent GenerationFailure to Follow InstructionsSuccess
Figure 35Low-Level Vision-RelightingSuccessFailure to Follow InstructionsSuccess
Figure 36Spatial Control-CannyInconsistent GenerationFailure to Follow InstructionsSuccess
Figure 37Spatial Control-DepthSuccessFailure to Follow InstructionsSuccess
Figure 38Spatial Control-SketchInconsistent GenerationInconsistent GenerationSuccess
Figure 39Spatial Control-PoseSuccessInconsistent GenerationSuccess
Figure 40Spatial Control-MaskInconsistent GenerationFailure to Follow InstructionsInconsistent Generation
Figure 41Camera ControlInconsistent GenerationFailure to Follow InstructionsSuccess
Figure 42Failure to Follow InstructionsFailure to Follow InstructionsSuccess
Figure 43In-Context Visual PromptingFailure to Follow InstructionsFailure to Follow InstructionsN/A
Figure 44Image to 3D ModelingSuccessFailure to Follow InstructionsFailure to Follow Instructions
Figure 45UV Map to 3D RenderingSuccessInconsistent GenerationFailure to Follow Instructions
Figure 46Novel View SynthesisSuccessSuccessFailure to Follow Instructions
Figure 47Image SegmentationFailure to Follow InstructionsFailure to Follow InstructionsSuccess
Figure 48SuccessFailure to Follow InstructionsSuccess
Figure 49SuccessFailure to Follow InstructionsSuccess
Figure 50Edge DetectionSuccessSuccessSuccess
Figure 51SuccessFailure to Follow InstructionsSuccess
Figure 52SuccessFailure to Follow InstructionsSuccess
Figure 53Salient ObjectSuccessFailure to Follow InstructionsSuccess
Figure 54SuccessSuccessSuccess
Figure 55SuccessSuccessSuccess
Figure 56Depth EstimationSuccessFailure to Follow InstructionsSuccess
Figure 57Normal EstimationSuccessFailure to Follow InstructionsSuccess
Figure 58Layout DetectionInconsistent GenerationInconsistent GenerationSuccess
Figure 59Text DetectionFailure to Follow InstructionsFailure to Follow InstructionsSuccess
Figure 60Inconsistent GenerationInconsistent GenerationSuccess
Figure 61Inconsistent GenerationInconsistent GenerationSuccess
Figure 62Inconsistent GenerationInconsistent GenerationSuccess
Figure 63Inconsistent GenerationInconsistent GenerationSuccess
", + "image_path": "69fc7b667b5296c731fc241b01cedfd07a9c8e3fb9b10cdf2db89fc2a34aef2f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 190, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 190, + 83 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 190, + 83 + ], + "type": "text", + "content": "2.1 Text-to-Image Tasks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 78, + 92, + 262, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 92, + 262, + 105 + ], + "spans": [ + { + "bbox": [ + 78, + 92, + 262, + 105 + ], + "type": "text", + "content": "2.1.1 Complex Text Following Capability" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 76, + 111, + 533, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 111, + 533, + 223 + ], + "spans": [ + { + "bbox": [ + 76, + 111, + 533, + 223 + ], + "type": "text", + "content": "Recent progress in text-to-image generation has shown impressive abilities in generating diverse and realistic images based on text prompts. However, composing multiple objects with various attributes and relationships accurately into one scene remains a significant challenge for current text-to-image generative models [92, 85, 8, 81, 6]. In this section, we assess models' ability for compositional text-to-image generation from four perspectives following [41], which include attribute binding, numeracy, object relationship, and complex compositions. Attribute binding evaluates whether the model correctly assigns attributes, such as color, shape, and texture to the appropriate objects. Numeracy evaluates whether the number of generated objects matches the quantities specified in the prompt. Object relationships refer to both spatial (2D/3D) and non-spatial interactions among objects. Complex compositions evaluate the model's ability to handle multiple types of constraints simultaneously, especially given long or detailed prompts." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 225, + 533, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 225, + 533, + 315 + ], + "spans": [ + { + "bbox": [ + 76, + 225, + 533, + 315 + ], + "type": "text", + "content": "As shown in Figure 1 row 1, GPT-4o outperforms both Gemini 2.0 Flash and Midjourney in numeracy tasks. While GPT-4o accurately represents a single plate, Gemini 2.0 and Midjourney represent two plates instead. In terms of understanding object relationships, GPT-4o is the only model that correctly infers the action \"walk towards\" from the ragdoll to the labrador. However, GPT-4o struggles with more complex terms like \"pentagonal pyramid\", failing to interpret it correctly (see Figure 1 row 4). This suggests that GPT-4o may have difficulty accurately interpreting objects with unusual geometries. When it comes to abstract prompts, GPT-4o also appears to lack imagination (see Figure 2 row 2), whereas Midjourney v6.1 demonstrates better creativity in this case, outperforming both GPT-4o and Gemini 2.0 Flash." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 76, + 318, + 533, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 318, + 533, + 397 + ], + "spans": [ + { + "bbox": [ + 76, + 318, + 533, + 397 + ], + "type": "text", + "content": "For complex text-to-image generation, we evaluate GPT-4o's performance with Gemini 2.0 Flash [99] and FLUX.1-Pro [51], using the text prompts collected from [124, 106, 115]. As shown in Figure 3, both GPT-4o and FLUX excel at generating realistic and harmonious scenes align with the text prompts. However, we observe that GPT-4o shows limitations in generating culturally related elements. For example, the generated crown for the Chinese general is western-style rather than chinese-style (see Figure 4 row 2). Additionally, in large scene generation, GPT-4o struggles to maintain boundary continuity, whereas FLUX produces a more natural composition (see Figure 4 row 3)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 76, + 400, + 533, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 400, + 533, + 436 + ], + "spans": [ + { + "bbox": [ + 76, + 400, + 533, + 436 + ], + "type": "text", + "content": "Overall, we conclude that GPT-4o excels at text-to-image generation in terms of attribute binding, generative numeracy, object relationship, and complex compositions. However, it exhibits limitations in generating uncommon objects, culturally specific elements and in maintaining continuity when composing large scenes." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 117, + 79, + 240, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 79, + 240, + 92 + ], + "spans": [ + { + "bbox": [ + 117, + 79, + 240, + 92 + ], + "type": "text", + "content": "Text-to-Image Generation" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 154, + 100, + 165, + 114 + ], + "blocks": [ + { + "bbox": [ + 154, + 100, + 165, + 114 + ], + "lines": [ + { + "bbox": [ + 154, + 100, + 165, + 114 + ], + "spans": [ + { + "bbox": [ + 154, + 100, + 165, + 114 + ], + "type": "image", + "image_path": "5e416a9762e2def779608eb1bf6eee5a9cee49745a628b45ccc17073ed461705.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 166, + 102, + 473, + 115 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 102, + 473, + 115 + ], + "spans": [ + { + "bbox": [ + 166, + 102, + 473, + 115 + ], + "type": "text", + "content": "Evaluation: Visual content precisely following the text instruction." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 105, + 116, + 222, + 232 + ], + "blocks": [ + { + "bbox": [ + 105, + 116, + 222, + 232 + ], + "lines": [ + { + "bbox": [ + 105, + 116, + 222, + 232 + ], + "spans": [ + { + "bbox": [ + 105, + 116, + 222, + 232 + ], + "type": "image", + "image_path": "b56cffa92dcc8965f109102c1a0975dedfc665539ea8cf6c71d5a2879dfe8dcc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 241, + 116, + 359, + 234 + ], + "blocks": [ + { + "bbox": [ + 241, + 116, + 359, + 234 + ], + "lines": [ + { + "bbox": [ + 241, + 116, + 359, + 234 + ], + "spans": [ + { + "bbox": [ + 241, + 116, + 359, + 234 + ], + "type": "image", + "image_path": "e6d59fa9484b3877034764c9fa8752e51c78a7b4eba2a3dd224cdb9236f23368.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 376, + 116, + 504, + 234 + ], + "blocks": [ + { + "bbox": [ + 376, + 116, + 504, + 234 + ], + "lines": [ + { + "bbox": [ + 376, + 116, + 504, + 234 + ], + "spans": [ + { + "bbox": [ + 376, + 116, + 504, + 234 + ], + "type": "image", + "image_path": "114bf170640575674f20a5f89dd3e1c35830102990e7d45e9fe0e42bf870b7d0.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 105, + 248, + 224, + 365 + ], + "blocks": [ + { + "bbox": [ + 105, + 248, + 224, + 365 + ], + "lines": [ + { + "bbox": [ + 105, + 248, + 224, + 365 + ], + "spans": [ + { + "bbox": [ + 105, + 248, + 224, + 365 + ], + "type": "image", + "image_path": "e21293e1dde8f2f8a8736c0a4d673afa08874092081ea4b1ec07788bbe177b05.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 241, + 247, + 361, + 368 + ], + "blocks": [ + { + "bbox": [ + 107, + 236, + 373, + 247 + ], + "lines": [ + { + "bbox": [ + 107, + 236, + 373, + 247 + ], + "spans": [ + { + "bbox": [ + 107, + 236, + 373, + 247 + ], + "type": "text", + "content": "Input Text: \"A yellow bowl, a blue mug and a pink plate on the table.\"" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 241, + 247, + 361, + 368 + ], + "lines": [ + { + "bbox": [ + 241, + 247, + 361, + 368 + ], + "spans": [ + { + "bbox": [ + 241, + 247, + 361, + 368 + ], + "type": "image", + "image_path": "cde20841ace4621a197135b25fda935e422854fc542a786ca15f9aea2eb46486.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 109, + 368, + 299, + 378 + ], + "lines": [ + { + "bbox": [ + 109, + 368, + 299, + 378 + ], + "spans": [ + { + "bbox": [ + 109, + 368, + 299, + 378 + ], + "type": "text", + "content": "Input Text: \"A ragdoll walks towards a labrador.\"" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 376, + 246, + 503, + 368 + ], + "blocks": [ + { + "bbox": [ + 376, + 246, + 503, + 368 + ], + "lines": [ + { + "bbox": [ + 376, + 246, + 503, + 368 + ], + "spans": [ + { + "bbox": [ + 376, + 246, + 503, + 368 + ], + "type": "image", + "image_path": "d0a18282d6404ef0ab645148e73d17fe445982eb8c40cb3c0d5a9081efd6d52c.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 105, + 380, + 227, + 497 + ], + "blocks": [ + { + "bbox": [ + 105, + 380, + 227, + 497 + ], + "lines": [ + { + "bbox": [ + 105, + 380, + 227, + 497 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 227, + 497 + ], + "type": "image", + "image_path": "3bffa6f0cb5545336d1dece3985a3fcaff2ad60da5c231db47b90005c63f4ceb.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 106, + 498, + 496, + 519 + ], + "lines": [ + { + "bbox": [ + 106, + 498, + 496, + 519 + ], + "spans": [ + { + "bbox": [ + 106, + 498, + 496, + 519 + ], + "type": "text", + "content": "Input Text: \"Three differently colored apples (yellow, green, red from left to right) with a Coca-Cola bottle placed behind the middle apple.\"" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 242, + 380, + 361, + 497 + ], + "blocks": [ + { + "bbox": [ + 242, + 380, + 361, + 497 + ], + "lines": [ + { + "bbox": [ + 242, + 380, + 361, + 497 + ], + "spans": [ + { + "bbox": [ + 242, + 380, + 361, + 497 + ], + "type": "image", + "image_path": "f333d581ad570f753e99a7e73c99d827c89d746ae6f1f549c53cf30cf7e7ea33.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 376, + 379, + 503, + 497 + ], + "blocks": [ + { + "bbox": [ + 376, + 379, + 503, + 497 + ], + "lines": [ + { + "bbox": [ + 376, + 379, + 503, + 497 + ], + "spans": [ + { + "bbox": [ + 376, + 379, + 503, + 497 + ], + "type": "image", + "image_path": "107a161e8c64a21e6ae887d967ee6aa6bf1c940aafacc290e429d8deb9516754.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 105, + 521, + 226, + 639 + ], + "blocks": [ + { + "bbox": [ + 105, + 521, + 226, + 639 + ], + "lines": [ + { + "bbox": [ + 105, + 521, + 226, + 639 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 226, + 639 + ], + "type": "image", + "image_path": "00b435bd1d8026aa4f0d08be8a9579479f625012cae88f49f81a5d958cf5c199.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 106, + 640, + 501, + 652 + ], + "lines": [ + { + "bbox": [ + 106, + 640, + 501, + 652 + ], + "spans": [ + { + "bbox": [ + 106, + 640, + 501, + 652 + ], + "type": "text", + "content": "Input Text: \"The oval sphere was nestled between the rectangular prism and the pentagonal pyramid.\"" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 77, + 687, + 533, + 733 + ], + "lines": [ + { + "bbox": [ + 77, + 687, + 533, + 733 + ], + "spans": [ + { + "bbox": [ + 77, + 687, + 533, + 733 + ], + "type": "text", + "content": "Figure 1: Task: Compositional text-to-image generation. Evaluate the image-text alignment on attribute binding, numeracy, and object relationship. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o outperforms Gemini 2.0 Flash and Midjourney v6.1 across all aspects. However, GPT-4o struggles with uncommon objects with a special geometry." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 241, + 521, + 361, + 639 + ], + "blocks": [ + { + "bbox": [ + 241, + 521, + 361, + 639 + ], + "lines": [ + { + "bbox": [ + 241, + 521, + 361, + 639 + ], + "spans": [ + { + "bbox": [ + 241, + 521, + 361, + 639 + ], + "type": "image", + "image_path": "b1e74b98b6b45c314db43ad44efec736de0526ea53933c41d6f494dfc222ec5c.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 376, + 521, + 503, + 639 + ], + "blocks": [ + { + "bbox": [ + 376, + 521, + 503, + 639 + ], + "lines": [ + { + "bbox": [ + 376, + 521, + 503, + 639 + ], + "spans": [ + { + "bbox": [ + 376, + 521, + 503, + 639 + ], + "type": "image", + "image_path": "ec488303c09dbf9dfb728fd1952368c1c3fa85f708e3b290ecd1a36b50ef4803.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 144, + 657, + 176, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 657, + 176, + 667 + ], + "spans": [ + { + "bbox": [ + 144, + 657, + 176, + 667 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 267, + 658, + 337, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 658, + 337, + 669 + ], + "spans": [ + { + "bbox": [ + 267, + 658, + 337, + 669 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 407, + 657, + 474, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 407, + 657, + 474, + 669 + ], + "spans": [ + { + "bbox": [ + 407, + 657, + 474, + 669 + ], + "type": "text", + "content": "Midjourney v6.1" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 155, + 101, + 165, + 114 + ], + "blocks": [ + { + "bbox": [ + 155, + 101, + 165, + 114 + ], + "lines": [ + { + "bbox": [ + 155, + 101, + 165, + 114 + ], + "spans": [ + { + "bbox": [ + 155, + 101, + 165, + 114 + ], + "type": "image", + "image_path": "41cf37136782a8224523753684cab11c0ddb36c12dfd39ee89ac851285853af2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 167, + 103, + 473, + 115 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 103, + 473, + 115 + ], + "spans": [ + { + "bbox": [ + 167, + 103, + 473, + 115 + ], + "type": "text", + "content": "Evaluation: Visual content precisely following the text instruction." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 108, + 116, + 228, + 232 + ], + "blocks": [ + { + "bbox": [ + 108, + 116, + 228, + 232 + ], + "lines": [ + { + "bbox": [ + 108, + 116, + 228, + 232 + ], + "spans": [ + { + "bbox": [ + 108, + 116, + 228, + 232 + ], + "type": "image", + "image_path": "742bdab0de5d04587ee79a5c9d93f90cfb4494e5a48dcf258a1406c6265bd627.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 107, + 234, + 509, + 252 + ], + "lines": [ + { + "bbox": [ + 107, + 234, + 509, + 252 + ], + "spans": [ + { + "bbox": [ + 107, + 234, + 509, + 252 + ], + "type": "text", + "content": "Input Text: \"The round, juicy watermelon sat in the cool, refreshing bowl of ice, waiting to be sliced open and devoured.\"" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 241, + 116, + 374, + 232 + ], + "blocks": [ + { + "bbox": [ + 241, + 116, + 374, + 232 + ], + "lines": [ + { + "bbox": [ + 241, + 116, + 374, + 232 + ], + "spans": [ + { + "bbox": [ + 241, + 116, + 374, + 232 + ], + "type": "image", + "image_path": "c73e3aa340f33cab4cf19354249e37fe1bd7f76aef6ec7fbcc154198f1dad05b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 388, + 116, + 504, + 232 + ], + "blocks": [ + { + "bbox": [ + 388, + 116, + 504, + 232 + ], + "lines": [ + { + "bbox": [ + 388, + 116, + 504, + 232 + ], + "spans": [ + { + "bbox": [ + 388, + 116, + 504, + 232 + ], + "type": "image", + "image_path": "382030f3e3f0298bca0a14e5cda55ee5859de8f05e70a6182873cc5b53987e77.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 108, + 253, + 228, + 372 + ], + "blocks": [ + { + "bbox": [ + 108, + 253, + 228, + 372 + ], + "lines": [ + { + "bbox": [ + 108, + 253, + 228, + 372 + ], + "spans": [ + { + "bbox": [ + 108, + 253, + 228, + 372 + ], + "type": "image", + "image_path": "9a87f98dd86139de0d38246d54b1d070591e2fdb526d3285409a473583f28ec6.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 243, + 253, + 373, + 371 + ], + "blocks": [ + { + "bbox": [ + 243, + 253, + 373, + 371 + ], + "lines": [ + { + "bbox": [ + 243, + 253, + 373, + 371 + ], + "spans": [ + { + "bbox": [ + 243, + 253, + 373, + 371 + ], + "type": "image", + "image_path": "fce6b55e09781aa60141751625be4a55c7d0fd1eef584be53a6585c781eade33.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 387, + 253, + 506, + 372 + ], + "blocks": [ + { + "bbox": [ + 387, + 253, + 506, + 372 + ], + "lines": [ + { + "bbox": [ + 387, + 253, + 506, + 372 + ], + "spans": [ + { + "bbox": [ + 387, + 253, + 506, + 372 + ], + "type": "image", + "image_path": "0f7ad2208e211f067756484bb7a6847038cf13549a2f34c8a13b12eeda49aad7.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 108, + 393, + 228, + 510 + ], + "blocks": [ + { + "bbox": [ + 109, + 373, + 507, + 393 + ], + "lines": [ + { + "bbox": [ + 109, + 373, + 507, + 393 + ], + "spans": [ + { + "bbox": [ + 109, + 373, + 507, + 393 + ], + "type": "text", + "content": "Input Text: \"The bold, expressive strokes of the artist's brush brought the blank canvas to life, forming a vibrant and dynamic masterpiece.\"" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 108, + 393, + 228, + 510 + ], + "lines": [ + { + "bbox": [ + 108, + 393, + 228, + 510 + ], + "spans": [ + { + "bbox": [ + 108, + 393, + 228, + 510 + ], + "type": "image", + "image_path": "d87f5d38548a498be4acc2e6175a859f019a900cdcbd8394bd2700b4f145ed8f.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 243, + 393, + 373, + 510 + ], + "blocks": [ + { + "bbox": [ + 243, + 393, + 373, + 510 + ], + "lines": [ + { + "bbox": [ + 243, + 393, + 373, + 510 + ], + "spans": [ + { + "bbox": [ + 243, + 393, + 373, + 510 + ], + "type": "image", + "image_path": "c87b7b6792c5fda9d5340959a376273a7f5d0b80db6770aea6ec9e15c6ec596d.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 387, + 393, + 507, + 511 + ], + "blocks": [ + { + "bbox": [ + 387, + 393, + 507, + 511 + ], + "lines": [ + { + "bbox": [ + 387, + 393, + 507, + 511 + ], + "spans": [ + { + "bbox": [ + 387, + 393, + 507, + 511 + ], + "type": "image", + "image_path": "4d7cf29e3244126ed31062342f32c73674817be1d0079f2da561d4e75f3368e2.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 108, + 525, + 228, + 643 + ], + "blocks": [ + { + "bbox": [ + 111, + 513, + 429, + 523 + ], + "lines": [ + { + "bbox": [ + 111, + 513, + 429, + 523 + ], + "spans": [ + { + "bbox": [ + 111, + 513, + 429, + 523 + ], + "type": "text", + "content": "Input Text: \"The heavy raindrops fell on the smooth glass and the textured roof.\"" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 108, + 525, + 228, + 643 + ], + "lines": [ + { + "bbox": [ + 108, + 525, + 228, + 643 + ], + "spans": [ + { + "bbox": [ + 108, + 525, + 228, + 643 + ], + "type": "image", + "image_path": "c36a7bcd11820fd2f7d3c63fbf0e563dbdebdbcf2828507130fec8b86c2c1cc1.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 243, + 525, + 374, + 644 + ], + "blocks": [ + { + "bbox": [ + 243, + 525, + 374, + 644 + ], + "lines": [ + { + "bbox": [ + 243, + 525, + 374, + 644 + ], + "spans": [ + { + "bbox": [ + 243, + 525, + 374, + 644 + ], + "type": "image", + "image_path": "0e3a0dc448dd3f289f1837b24241f0ae490d1d79799115f52e41475efc038437.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 108, + 645, + 494, + 666 + ], + "lines": [ + { + "bbox": [ + 108, + 645, + 494, + 666 + ], + "spans": [ + { + "bbox": [ + 108, + 645, + 494, + 666 + ], + "type": "text", + "content": "Input Text: \"The gentle, soothing melody of the piano filled the concert hall, as the pianist's fingers danced over the keys.\"" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 77, + 691, + 532, + 746 + ], + "lines": [ + { + "bbox": [ + 77, + 691, + 532, + 746 + ], + "spans": [ + { + "bbox": [ + 77, + 691, + 532, + 746 + ], + "type": "text", + "content": "Figure 2: Task: Compositional text-to-image generation. Evaluate the image-text alignment on attribute binding and complex compositions. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o outperforms the other two models in generating objects aligned with the text prompts accurately. But for more abstract and creative tasks, Midjourney v6.1 performs the best." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 387, + 525, + 507, + 644 + ], + "blocks": [ + { + "bbox": [ + 387, + 525, + 507, + 644 + ], + "lines": [ + { + "bbox": [ + 387, + 525, + 507, + 644 + ], + "spans": [ + { + "bbox": [ + 387, + 525, + 507, + 644 + ], + "type": "image", + "image_path": "adcb856bb8a351cb62cc8d6fd9dcfe0f715a542653534f39e99b1973a7067ec8.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 143, + 667, + 175, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 667, + 175, + 677 + ], + "spans": [ + { + "bbox": [ + 143, + 667, + 175, + 677 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 266, + 667, + 335, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 266, + 667, + 335, + 677 + ], + "spans": [ + { + "bbox": [ + 266, + 667, + 335, + 677 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 406, + 666, + 472, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 666, + 472, + 677 + ], + "spans": [ + { + "bbox": [ + 406, + 666, + 472, + 677 + ], + "type": "text", + "content": "Midjourney v6.1" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 119, + 81, + 240, + 94 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 81, + 240, + 94 + ], + "spans": [ + { + "bbox": [ + 119, + 81, + 240, + 94 + ], + "type": "text", + "content": "Text-to-Image Generation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 85, + 241, + 109 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 85, + 241, + 109 + ], + "spans": [ + { + "bbox": [ + 107, + 85, + 241, + 109 + ], + "type": "text", + "content": "Text-to-Image Generation (with complex text prompt)" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 137, + 118, + 482, + 131 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 118, + 482, + 131 + ], + "spans": [ + { + "bbox": [ + 137, + 118, + 482, + 131 + ], + "type": "text", + "content": "Evaluation: Visual content precisely following the text instruction." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 106, + 133, + 220, + 247 + ], + "blocks": [ + { + "bbox": [ + 106, + 133, + 220, + 247 + ], + "lines": [ + { + "bbox": [ + 106, + 133, + 220, + 247 + ], + "spans": [ + { + "bbox": [ + 106, + 133, + 220, + 247 + ], + "type": "image", + "image_path": "27fce9400300fb184ed43e1e4b4f8eb042ae16f63646b6eda324b41fff54aede.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 100, + 251, + 504, + 304 + ], + "lines": [ + { + "bbox": [ + 100, + 251, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 100, + 251, + 504, + 304 + ], + "type": "text", + "content": "Input Text: \"An icy landscape. A vast expanse of snow-covered mountain peaks stretches endlessly. Beneath them is a dense forest and a colossal frozen lake. Three people are boating in three boats separately in the lake. Not far from the lake, a volcano threatens eruption, its rumblings felt even from afar. Above, a ferocious red dragon dominates the sky and commands the heavens, fueled by the volcano's relentless energy flow.\" (Prompt from GenArtist)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 239, + 133, + 354, + 247 + ], + "blocks": [ + { + "bbox": [ + 239, + 133, + 354, + 247 + ], + "lines": [ + { + "bbox": [ + 239, + 133, + 354, + 247 + ], + "spans": [ + { + "bbox": [ + 239, + 133, + 354, + 247 + ], + "type": "image", + "image_path": "116d28e49cd90395347ff38e9ec8e1c04b768757b89b7a9a1d0a0f0f317b20f4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 373, + 133, + 488, + 247 + ], + "blocks": [ + { + "bbox": [ + 373, + 133, + 488, + 247 + ], + "lines": [ + { + "bbox": [ + 373, + 133, + 488, + 247 + ], + "spans": [ + { + "bbox": [ + 373, + 133, + 488, + 247 + ], + "type": "image", + "image_path": "bfaf1c18b294719962fa4fcaa9024d109045581572e81ebad1ebe756236b696a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 106, + 309, + 220, + 422 + ], + "blocks": [ + { + "bbox": [ + 106, + 309, + 220, + 422 + ], + "lines": [ + { + "bbox": [ + 106, + 309, + 220, + 422 + ], + "spans": [ + { + "bbox": [ + 106, + 309, + 220, + 422 + ], + "type": "image", + "image_path": "64e6d603fb4bb84b5ebce439ab2fdb3c6e0e566e8af8982c1c50d5378b6fe487.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 239, + 308, + 354, + 422 + ], + "blocks": [ + { + "bbox": [ + 239, + 308, + 354, + 422 + ], + "lines": [ + { + "bbox": [ + 239, + 308, + 354, + 422 + ], + "spans": [ + { + "bbox": [ + 239, + 308, + 354, + 422 + ], + "type": "image", + "image_path": "7ed09698c0927be9038e1132fc7243c7e628f5e5c041880670c7d1ac190273ad.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 373, + 308, + 487, + 422 + ], + "blocks": [ + { + "bbox": [ + 373, + 308, + 487, + 422 + ], + "lines": [ + { + "bbox": [ + 373, + 308, + 487, + 422 + ], + "spans": [ + { + "bbox": [ + 373, + 308, + 487, + 422 + ], + "type": "image", + "image_path": "484e31e49c1e7a2ebd3c367ae0db8fcd10716584cec8c8e06beaca0f6c7a0381.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 106, + 475, + 220, + 588 + ], + "blocks": [ + { + "bbox": [ + 106, + 475, + 220, + 588 + ], + "lines": [ + { + "bbox": [ + 106, + 475, + 220, + 588 + ], + "spans": [ + { + "bbox": [ + 106, + 475, + 220, + 588 + ], + "type": "image", + "image_path": "2de6dd439640e1a491cd2669646ca4deab67272994343185f41956a82e6a40a1.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 591, + 504, + 634 + ], + "lines": [ + { + "bbox": [ + 104, + 591, + 504, + 634 + ], + "spans": [ + { + "bbox": [ + 104, + 591, + 504, + 634 + ], + "type": "text", + "content": "Input Text: \"In a magical seascape, a majestic ship sails through crystal blue waters surrounded by vibrant marine life and soaring birds. Towering cliffs frame the scene, while a stunning rainbow arches across the sky, blending with ethereal clouds. This enchanting journey captures the serene beauty of nature's wonders.\" (Prompt from IterComp)" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 239, + 474, + 354, + 589 + ], + "blocks": [ + { + "bbox": [ + 104, + 428, + 504, + 472 + ], + "lines": [ + { + "bbox": [ + 104, + 428, + 504, + 472 + ], + "spans": [ + { + "bbox": [ + 104, + 428, + 504, + 472 + ], + "type": "text", + "content": "Input Text: \"On the rooftop of a skyscraper in a bustling cyberpunk city, a figure in a trench coat and neon-lit visor stands amidst a garden of bio-luminescent plants, overlooking the maze of flying cars and towering holograms. Robotic birds flit among the foliage, digital billboards flash advertisements in the distance.\" (Prompt from IterComp)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 239, + 474, + 354, + 589 + ], + "lines": [ + { + "bbox": [ + 239, + 474, + 354, + 589 + ], + "spans": [ + { + "bbox": [ + 239, + 474, + 354, + 589 + ], + "type": "image", + "image_path": "0c0470a214490980a8b144c3e88a00976a14f32edb1762c91f3846a0c05ec5b2.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 373, + 474, + 487, + 588 + ], + "blocks": [ + { + "bbox": [ + 373, + 474, + 487, + 588 + ], + "lines": [ + { + "bbox": [ + 373, + 474, + 487, + 588 + ], + "spans": [ + { + "bbox": [ + 373, + 474, + 487, + 588 + ], + "type": "image", + "image_path": "745ba50ac275ead363177aa6a6389523a01b4c236062fe89cb966d79aeafe69b.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 665, + 533, + 709 + ], + "lines": [ + { + "bbox": [ + 77, + 665, + 533, + 709 + ], + "spans": [ + { + "bbox": [ + 77, + 665, + 533, + 709 + ], + "type": "text", + "content": "Figure 3: Task: Compositional text-to-image generation. Evaluate the image-text alignment on complex compositions. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and FLUX.1-Pro [51]. Observation: GPT-4o and FLUX can generate more harmonious and natural scene than Gemini 2.0 Flash." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 143, + 639, + 178, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 639, + 178, + 650 + ], + "spans": [ + { + "bbox": [ + 143, + 639, + 178, + 650 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 256, + 639, + 333, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 256, + 639, + 333, + 650 + ], + "spans": [ + { + "bbox": [ + 256, + 639, + 333, + 650 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 414, + 639, + 443, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 414, + 639, + 443, + 650 + ], + "spans": [ + { + "bbox": [ + 414, + 639, + 443, + 650 + ], + "type": "text", + "content": "FLUX" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 103, + 95, + 237, + 119 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 95, + 237, + 119 + ], + "spans": [ + { + "bbox": [ + 103, + 95, + 237, + 119 + ], + "type": "text", + "content": "Text-to-Image Generation (with complex text prompt)" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 121, + 125, + 133, + 139 + ], + "blocks": [ + { + "bbox": [ + 121, + 125, + 133, + 139 + ], + "lines": [ + { + "bbox": [ + 121, + 125, + 133, + 139 + ], + "spans": [ + { + "bbox": [ + 121, + 125, + 133, + 139 + ], + "type": "image", + "image_path": "30ed799a330abd36ec3b456792e50674b62123dda7d183a4909f198292186c06.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 136, + 129, + 482, + 141 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 129, + 482, + 141 + ], + "spans": [ + { + "bbox": [ + 136, + 129, + 482, + 141 + ], + "type": "text", + "content": "Evaluation: Visual content precisely following the text instruction." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 105, + 144, + 219, + 258 + ], + "blocks": [ + { + "bbox": [ + 105, + 144, + 219, + 258 + ], + "lines": [ + { + "bbox": [ + 105, + 144, + 219, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 144, + 219, + 258 + ], + "type": "image", + "image_path": "0a19bafce54aa9b28620506b9c6051449ff283c024b7b8bdde3dea6959a5f2ab.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 95, + 262, + 497, + 305 + ], + "lines": [ + { + "bbox": [ + 95, + 262, + 497, + 305 + ], + "spans": [ + { + "bbox": [ + 95, + 262, + 497, + 305 + ], + "type": "text", + "content": "Input Text: \"Under the luminous full moon, a serene Japanese garden with traditional pagodas and a tranquil pond creates a magical night scene. The soft glow from the lantern-lit buildings reflects on the water, blending nature and architecture in harmony. The moonlight bathes the landscape, enhancing the peaceful ambiance.\" (Prompt from IterComp)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 241, + 145, + 354, + 258 + ], + "blocks": [ + { + "bbox": [ + 241, + 145, + 354, + 258 + ], + "lines": [ + { + "bbox": [ + 241, + 145, + 354, + 258 + ], + "spans": [ + { + "bbox": [ + 241, + 145, + 354, + 258 + ], + "type": "image", + "image_path": "1660f44ab6725c952d5b0bc43505f77c0d7cb4b552be04d6430ee2b25ec63d61.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 374, + 145, + 487, + 258 + ], + "blocks": [ + { + "bbox": [ + 374, + 145, + 487, + 258 + ], + "lines": [ + { + "bbox": [ + 374, + 145, + 487, + 258 + ], + "spans": [ + { + "bbox": [ + 374, + 145, + 487, + 258 + ], + "type": "image", + "image_path": "89e41678c8fcc68e299a5cf2931d67bb1f2339142bca0c32c1593b4888650519.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 121, + 312, + 204, + 434 + ], + "blocks": [ + { + "bbox": [ + 121, + 312, + 204, + 434 + ], + "lines": [ + { + "bbox": [ + 121, + 312, + 204, + 434 + ], + "spans": [ + { + "bbox": [ + 121, + 312, + 204, + 434 + ], + "type": "image", + "image_path": "f016fb55ce2f2b9f48a4d23f36970310500e1250abf50d5e88f1902507f424c6.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 99, + 440, + 501, + 483 + ], + "lines": [ + { + "bbox": [ + 99, + 440, + 501, + 483 + ], + "spans": [ + { + "bbox": [ + 99, + 440, + 501, + 483 + ], + "type": "text", + "content": "Input Text: \"A Chinese general wearing a crown, with whiskers and golden Chinese style armor, standing with a majestic dragon head on his chest, symbolizing his strength, wearing black and gold boots. His appearance exudes a sense of authority, wisdom, and an unyielding spirit, embodying the ideal ancient Chinese hero.\" (Prompt from RPG)" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 253, + 312, + 342, + 434 + ], + "blocks": [ + { + "bbox": [ + 253, + 312, + 342, + 434 + ], + "lines": [ + { + "bbox": [ + 253, + 312, + 342, + 434 + ], + "spans": [ + { + "bbox": [ + 253, + 312, + 342, + 434 + ], + "type": "image", + "image_path": "9077e20f899c60d5e1c26e1748dfa141980bf09da6c613467d42c1143bab34a1.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 384, + 312, + 476, + 434 + ], + "blocks": [ + { + "bbox": [ + 384, + 312, + 476, + 434 + ], + "lines": [ + { + "bbox": [ + 384, + 312, + 476, + 434 + ], + "spans": [ + { + "bbox": [ + 384, + 312, + 476, + 434 + ], + "type": "image", + "image_path": "c341891a5dd74ee440ca81c75da11adba9b78fd29a19903df5c98dbac7513e19.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 93, + 489, + 219, + 574 + ], + "blocks": [ + { + "bbox": [ + 93, + 489, + 219, + 574 + ], + "lines": [ + { + "bbox": [ + 93, + 489, + 219, + 574 + ], + "spans": [ + { + "bbox": [ + 93, + 489, + 219, + 574 + ], + "type": "image", + "image_path": "4569221e0b0de61258bfb9ba425fbed4cda37201c120d48edf92be4e3bff1d5c.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 99, + 581, + 501, + 624 + ], + "lines": [ + { + "bbox": [ + 99, + 581, + 501, + 624 + ], + "spans": [ + { + "bbox": [ + 99, + 581, + 501, + 624 + ], + "type": "text", + "content": "Input Text: \"A beautiful landscape with a river in the middle, the left of the river is in the evening and in the winter with a big iceberg and a small village while some people are skiing on the river and some people are skating, the right of the river is in the summer with a volcano in the morning and a small village while some people are playing.\" (Prompt from RPG)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 221, + 490, + 309, + 574 + ], + "blocks": [ + { + "bbox": [ + 221, + 490, + 309, + 574 + ], + "lines": [ + { + "bbox": [ + 221, + 490, + 309, + 574 + ], + "spans": [ + { + "bbox": [ + 221, + 490, + 309, + 574 + ], + "type": "image", + "image_path": "475d909f8bd94a4e962a1af7f117f1db9f76e1f2cd7047ccbdf158cec0cbc2b2.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 654, + 533, + 698 + ], + "lines": [ + { + "bbox": [ + 77, + 654, + 533, + 698 + ], + "spans": [ + { + "bbox": [ + 77, + 654, + 533, + 698 + ], + "type": "text", + "content": "Figure 4: Task: Compositional text-to-image generation. Evaluate the image-text alignment on complex compositions. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and FLUX.1-Pro [51]. Observation: GPT-4o struggles to generate culturally related elements and maintain boundary continuity (see rows 2 and 3), similar to Gemini 2.0 Flash and FLUX." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 309, + 489, + 399, + 574 + ], + "blocks": [ + { + "bbox": [ + 309, + 489, + 399, + 574 + ], + "lines": [ + { + "bbox": [ + 309, + 489, + 399, + 574 + ], + "spans": [ + { + "bbox": [ + 309, + 489, + 399, + 574 + ], + "type": "image", + "image_path": "ef1d1bd4564e7e63c547c011879fd964bcadd7553b251ad9931c404cc35e496b.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 401, + 489, + 516, + 574 + ], + "blocks": [ + { + "bbox": [ + 401, + 489, + 516, + 574 + ], + "lines": [ + { + "bbox": [ + 401, + 489, + 516, + 574 + ], + "spans": [ + { + "bbox": [ + 401, + 489, + 516, + 574 + ], + "type": "image", + "image_path": "1086a817de2fda2fa73bc2221b337c167aa823711f8d87db081972a8c108577d.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 135, + 630, + 170, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 630, + 170, + 641 + ], + "spans": [ + { + "bbox": [ + 135, + 630, + 170, + 641 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 264, + 630, + 341, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 630, + 341, + 641 + ], + "spans": [ + { + "bbox": [ + 264, + 630, + 341, + 641 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 442, + 628, + 470, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 442, + 628, + 470, + 639 + ], + "spans": [ + { + "bbox": [ + 442, + 628, + 470, + 639 + ], + "type": "text", + "content": "FLUX" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 179, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 179, + 85 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 179, + 85 + ], + "type": "text", + "content": "2.1.2 Text Rendering" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 91, + 533, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 91, + 533, + 125 + ], + "spans": [ + { + "bbox": [ + 77, + 91, + 533, + 125 + ], + "type": "text", + "content": "Text rendering is a task that aims at generating texts (characters, sentences, or even paragraphs) on an image. The text content is usually guided by the input prompt. Previous models [27, 2] show good capability in generating short text (within 10 words, such as signs or short phrases), but their ability to generate long texts remains limited." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 129, + 532, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 129, + 532, + 174 + ], + "spans": [ + { + "bbox": [ + 77, + 129, + 532, + 174 + ], + "type": "text", + "content": "As shown in Figure 5, GPT-4o demonstrates comparable abilities to existing state-of-the-art (SOTA) baselines when generating short texts. All the methods except FLUX [51] perform well at rendering short text following the prompt. In this section, we primarily focus on long text rendering to examine whether GPT-4o can surpass these baselines for extended textual content." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 178, + 531, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 178, + 531, + 201 + ], + "spans": [ + { + "bbox": [ + 77, + 178, + 531, + 201 + ], + "type": "text", + "content": "We choose POSTA [12], Gemini 2.0 Flash [99], Ideogram 3.0 [2], and Playground-v3 [64] as the baselines because of their established capabilities in rendering longer texts. The results are shown in Figure 6 and Figure 7." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 206, + 334, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 206, + 334, + 217 + ], + "spans": [ + { + "bbox": [ + 77, + 206, + 334, + 217 + ], + "type": "text", + "content": "From these examples, we make the following key observations:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 226, + 531, + 342 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 104, + 226, + 529, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 226, + 529, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 226, + 529, + 269 + ], + "type": "text", + "content": "- GPT-4o's strength in long text generation: Compared with other baselines, GPT-4o demonstrates a superior ability to generate long, coherent text. In example 1 and example 3, GPT-4o produces detailed textual information with fewer than three characters generated incorrectly across more than 100 characters of text." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 274, + 531, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 274, + 531, + 305 + ], + "spans": [ + { + "bbox": [ + 105, + 274, + 531, + 305 + ], + "type": "text", + "content": "- Baseline limitations: When the input prompt becomes extremely long, models such as Gemini 2.0 Flash, Ideogram 3.0, and Playground-v3 often produce significantly more errors or produce vague text patches that are difficult to recognize." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 310, + 530, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 310, + 530, + 342 + ], + "spans": [ + { + "bbox": [ + 105, + 310, + 530, + 342 + ], + "type": "text", + "content": "- POSTA's performance: As a model specifically designed for poster-style text generation, POSTA performs closely to, or in some instances slightly more precisely than, GPT-4o. We hypothesize this is due to its multi-step pipeline tailored for long text rendering." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 77, + 353, + 532, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 353, + 532, + 376 + ], + "spans": [ + { + "bbox": [ + 77, + 353, + 532, + 376 + ], + "type": "text", + "content": "Overall, we conclude that GPT-4o excels at long text rendering, offering overwhelming performance compared to most existing commercial models, and delivering results on par with the latest specialized research models." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 100, + 91, + 205, + 103 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 91, + 205, + 103 + ], + "spans": [ + { + "bbox": [ + 100, + 91, + 205, + 103 + ], + "type": "text", + "content": "Short Text Rendering" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 208, + 112, + 220, + 125 + ], + "blocks": [ + { + "bbox": [ + 208, + 112, + 220, + 125 + ], + "lines": [ + { + "bbox": [ + 208, + 112, + 220, + 125 + ], + "spans": [ + { + "bbox": [ + 208, + 112, + 220, + 125 + ], + "type": "image", + "image_path": "9d7e40286ddeb28b93be6916d8101d062aa91f512e1c1ac6b0ccc889ae647631.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 222, + 115, + 402, + 127 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 115, + 402, + 127 + ], + "spans": [ + { + "bbox": [ + 222, + 115, + 402, + 127 + ], + "type": "text", + "content": "Evaluation: Text Rendering Precision." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 97, + 129, + 181, + 215 + ], + "blocks": [ + { + "bbox": [ + 97, + 129, + 181, + 215 + ], + "lines": [ + { + "bbox": [ + 97, + 129, + 181, + 215 + ], + "spans": [ + { + "bbox": [ + 97, + 129, + 181, + 215 + ], + "type": "image", + "image_path": "08d9d7d0f4498fa74ab30be14fe461e1fd36819539ce56b0a1505126518de149.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 193, + 130, + 277, + 215 + ], + "blocks": [ + { + "bbox": [ + 193, + 130, + 277, + 215 + ], + "lines": [ + { + "bbox": [ + 193, + 130, + 277, + 215 + ], + "spans": [ + { + "bbox": [ + 193, + 130, + 277, + 215 + ], + "type": "image", + "image_path": "234feaba9c9e1d0f65d67e6df566f551271d9b6bc16b92cc18990362a5b21b2f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 288, + 130, + 373, + 215 + ], + "blocks": [ + { + "bbox": [ + 288, + 130, + 373, + 215 + ], + "lines": [ + { + "bbox": [ + 288, + 130, + 373, + 215 + ], + "spans": [ + { + "bbox": [ + 288, + 130, + 373, + 215 + ], + "type": "image", + "image_path": "0ce5a44bd3cd7500b4b78db75c0f1ccc81baa684ffa77ebdb728c28b0fe9e785.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 384, + 130, + 513, + 215 + ], + "blocks": [ + { + "bbox": [ + 384, + 130, + 513, + 215 + ], + "lines": [ + { + "bbox": [ + 384, + 130, + 513, + 215 + ], + "spans": [ + { + "bbox": [ + 384, + 130, + 513, + 215 + ], + "type": "image", + "image_path": "204fddddcbcd5e6532fa8831a9afe9f12c4bf879dee56a592fddb35d74766418.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 100, + 221, + 501, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 221, + 501, + 257 + ], + "spans": [ + { + "bbox": [ + 100, + 221, + 501, + 257 + ], + "type": "text", + "content": "Input Text: \"A beautiful painting of flowing colors and styles forming the words 'The GPT-4o/Ideogram/FLUX/SD3 research paper is nowhere!'. the background is speckled with drops and splashes of paint.\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 97, + 262, + 154, + 347 + ], + "blocks": [ + { + "bbox": [ + 97, + 262, + 154, + 347 + ], + "lines": [ + { + "bbox": [ + 97, + 262, + 154, + 347 + ], + "spans": [ + { + "bbox": [ + 97, + 262, + 154, + 347 + ], + "type": "image", + "image_path": "5d4b6580226201ab5042c29511d6da5a3409b9f5c7fee1d2d9c074d14f64765b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 168, + 262, + 253, + 347 + ], + "blocks": [ + { + "bbox": [ + 168, + 262, + 253, + 347 + ], + "lines": [ + { + "bbox": [ + 168, + 262, + 253, + 347 + ], + "spans": [ + { + "bbox": [ + 168, + 262, + 253, + 347 + ], + "type": "image", + "image_path": "6ec2422a775f3f017131805ebc131bf30d8d32953e433ab7a7e35ce013d814bd.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 268, + 262, + 352, + 347 + ], + "blocks": [ + { + "bbox": [ + 268, + 262, + 352, + 347 + ], + "lines": [ + { + "bbox": [ + 268, + 262, + 352, + 347 + ], + "spans": [ + { + "bbox": [ + 268, + 262, + 352, + 347 + ], + "type": "image", + "image_path": "4f7ea6846a9f2e71ede47f66f8045aa234b77d2155c2f56eca68f95514f68e80.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 367, + 262, + 514, + 347 + ], + "blocks": [ + { + "bbox": [ + 367, + 262, + 514, + 347 + ], + "lines": [ + { + "bbox": [ + 367, + 262, + 514, + 347 + ], + "spans": [ + { + "bbox": [ + 367, + 262, + 514, + 347 + ], + "type": "image", + "image_path": "1b62b31f138cbdfe4ceec53b959cee36fc3c5f0c22524ab44623a5cd565ac9c8.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 102, + 354, + 478, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 354, + 478, + 378 + ], + "spans": [ + { + "bbox": [ + 102, + 354, + 478, + 378 + ], + "type": "text", + "content": "Input Text: \"Beautiful pixel art of a Wizard with hovering text 'Achievement unlocked: Diffusion models can spell now'.\"" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 97, + 385, + 154, + 469 + ], + "blocks": [ + { + "bbox": [ + 97, + 385, + 154, + 469 + ], + "lines": [ + { + "bbox": [ + 97, + 385, + 154, + 469 + ], + "spans": [ + { + "bbox": [ + 97, + 385, + 154, + 469 + ], + "type": "image", + "image_path": "4ba3110052c7723a701e2ccf5e847ad8c21956fcb34a978c1e55cce340d0131c.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 651, + 532, + 707 + ], + "lines": [ + { + "bbox": [ + 77, + 651, + 532, + 707 + ], + "spans": [ + { + "bbox": [ + 77, + 651, + 532, + 707 + ], + "type": "text", + "content": "Figure 5: Task: Short text rendering. Generate prompt-aligned, concise textual content (typically within 10 words) on an image. Setup: Each sample is produced based on a guiding text prompt. Comparisons are made with prior SOTA models [27, 2] and FLUX [51]. Observations: GPT-4o achieves performance on par with existing SOTA baselines in rendering short texts, consistently following the prompt with minimal errors. All evaluated methods—except FLUX [51]—deliver high-fidelity results in this setting." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 173, + 385, + 307, + 469 + ], + "blocks": [ + { + "bbox": [ + 173, + 385, + 307, + 469 + ], + "lines": [ + { + "bbox": [ + 173, + 385, + 307, + 469 + ], + "spans": [ + { + "bbox": [ + 173, + 385, + 307, + 469 + ], + "type": "image", + "image_path": "4fed5bdfe6b3d24177ff8f96422315e81b3ae864616ce0f21770566bf7fd8891.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 326, + 385, + 410, + 469 + ], + "blocks": [ + { + "bbox": [ + 326, + 385, + 410, + 469 + ], + "lines": [ + { + "bbox": [ + 326, + 385, + 410, + 469 + ], + "spans": [ + { + "bbox": [ + 326, + 385, + 410, + 469 + ], + "type": "image", + "image_path": "83fabbd535e803f344bff99d3d53a6c8a71d95998a6a377d7bae9a19ebb0830a.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 430, + 385, + 514, + 469 + ], + "blocks": [ + { + "bbox": [ + 430, + 385, + 514, + 469 + ], + "lines": [ + { + "bbox": [ + 430, + 385, + 514, + 469 + ], + "spans": [ + { + "bbox": [ + 430, + 385, + 514, + 469 + ], + "type": "image", + "image_path": "63a0fda286dbcf6aab1f70694ab0576b25d5fd247827bdfa027aca3ce2984b08.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 102, + 478, + 480, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 478, + 480, + 490 + ], + "spans": [ + { + "bbox": [ + 102, + 478, + 480, + 490 + ], + "type": "text", + "content": "Input Text: \"A monkey holding a sign reading 'Scaling transformer models is awesome'.\"" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 97, + 500, + 181, + 585 + ], + "blocks": [ + { + "bbox": [ + 97, + 500, + 181, + 585 + ], + "lines": [ + { + "bbox": [ + 97, + 500, + 181, + 585 + ], + "spans": [ + { + "bbox": [ + 97, + 500, + 181, + 585 + ], + "type": "image", + "image_path": "b44926116e89a328041a0572f04bcac45c5be77916af431b03c4a0ab99dd761f.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 187, + 500, + 271, + 585 + ], + "blocks": [ + { + "bbox": [ + 187, + 500, + 271, + 585 + ], + "lines": [ + { + "bbox": [ + 187, + 500, + 271, + 585 + ], + "spans": [ + { + "bbox": [ + 187, + 500, + 271, + 585 + ], + "type": "image", + "image_path": "cb9fbff8e79609bd0c0397e9ff8bdd26c4934e0c043bf90309793e247c53acde.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 277, + 500, + 361, + 585 + ], + "blocks": [ + { + "bbox": [ + 277, + 500, + 361, + 585 + ], + "lines": [ + { + "bbox": [ + 277, + 500, + 361, + 585 + ], + "spans": [ + { + "bbox": [ + 277, + 500, + 361, + 585 + ], + "type": "image", + "image_path": "ecba2242bac62c8676019c58e03ec1fc8ba9f66e7431827caf98cf3565ae7429.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 368, + 501, + 512, + 585 + ], + "blocks": [ + { + "bbox": [ + 368, + 501, + 512, + 585 + ], + "lines": [ + { + "bbox": [ + 368, + 501, + 512, + 585 + ], + "spans": [ + { + "bbox": [ + 368, + 501, + 512, + 585 + ], + "type": "image", + "image_path": "eb143f4361a33ce174515f230c11a216ac7590d56a2fc328a2adc862245f0ce7.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 101, + 590, + 492, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 101, + 590, + 492, + 613 + ], + "spans": [ + { + "bbox": [ + 101, + 590, + 492, + 613 + ], + "type": "text", + "content": "Input Text: \"A surreal and humorous scene in a classroom with the words 'GPUs go brrrrr' written in white chalk on a blackboard. In front of the blackboard.\"" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 123, + 621, + 154, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 621, + 154, + 632 + ], + "spans": [ + { + "bbox": [ + 123, + 621, + 154, + 632 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 200, + 621, + 258, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 621, + 258, + 633 + ], + "spans": [ + { + "bbox": [ + 200, + 621, + 258, + 633 + ], + "type": "text", + "content": "Ideogram 3.0" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 306, + 621, + 332, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 621, + 332, + 632 + ], + "spans": [ + { + "bbox": [ + 306, + 621, + 332, + 632 + ], + "type": "text", + "content": "FLUX" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 429, + 621, + 451, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 429, + 621, + 451, + 632 + ], + "spans": [ + { + "bbox": [ + 429, + 621, + 451, + 632 + ], + "type": "text", + "content": "SD3" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 229, + 101, + 394, + 112 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 101, + 394, + 112 + ], + "spans": [ + { + "bbox": [ + 229, + 101, + 394, + 112 + ], + "type": "text", + "content": "Evaluation: Text Rendering Precision." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 111, + 116, + 196, + 243 + ], + "blocks": [ + { + "bbox": [ + 111, + 116, + 196, + 243 + ], + "lines": [ + { + "bbox": [ + 111, + 116, + 196, + 243 + ], + "spans": [ + { + "bbox": [ + 111, + 116, + 196, + 243 + ], + "type": "image", + "image_path": "afce16d9cf439bcdc04fa94f8ca4f2227195b2cf36f175b299fa1b98664ddbfa.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 139, + 247, + 169, + 256 + ], + "lines": [ + { + "bbox": [ + 139, + 247, + 169, + 256 + ], + "spans": [ + { + "bbox": [ + 139, + 247, + 169, + 256 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 197, + 116, + 283, + 243 + ], + "blocks": [ + { + "bbox": [ + 197, + 116, + 283, + 243 + ], + "lines": [ + { + "bbox": [ + 197, + 116, + 283, + 243 + ], + "spans": [ + { + "bbox": [ + 197, + 116, + 283, + 243 + ], + "type": "image", + "image_path": "c6b354d42faa888d692b35bdfbc5cd4599018152cf0064da6cd13e513960b86e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 226, + 247, + 257, + 256 + ], + "lines": [ + { + "bbox": [ + 226, + 247, + 257, + 256 + ], + "spans": [ + { + "bbox": [ + 226, + 247, + 257, + 256 + ], + "type": "text", + "content": "POSTA" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 284, + 116, + 370, + 243 + ], + "blocks": [ + { + "bbox": [ + 284, + 116, + 370, + 243 + ], + "lines": [ + { + "bbox": [ + 284, + 116, + 370, + 243 + ], + "spans": [ + { + "bbox": [ + 284, + 116, + 370, + 243 + ], + "type": "image", + "image_path": "f4cd9c8e039a64fce1592ca2bb40e8f9f9bf8f8c530f65984c76ff6708a91c6f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 293, + 247, + 359, + 256 + ], + "lines": [ + { + "bbox": [ + 293, + 247, + 359, + 256 + ], + "spans": [ + { + "bbox": [ + 293, + 247, + 359, + 256 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 370, + 116, + 497, + 243 + ], + "blocks": [ + { + "bbox": [ + 370, + 116, + 497, + 243 + ], + "lines": [ + { + "bbox": [ + 370, + 116, + 497, + 243 + ], + "spans": [ + { + "bbox": [ + 370, + 116, + 497, + 243 + ], + "type": "image", + "image_path": "c5582fe68220a5a384efa294ef35989ebef4760bd5b2897bd0ee23ceffe344a0.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 405, + 247, + 460, + 258 + ], + "lines": [ + { + "bbox": [ + 405, + 247, + 460, + 258 + ], + "spans": [ + { + "bbox": [ + 405, + 247, + 460, + 258 + ], + "type": "text", + "content": "Ideogram 3.0" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 115, + 259, + 165, + 269 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 259, + 165, + 269 + ], + "spans": [ + { + "bbox": [ + 115, + 259, + 165, + 269 + ], + "type": "text", + "content": "Input Text:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 115, + 270, + 495, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 270, + 495, + 290 + ], + "spans": [ + { + "bbox": [ + 115, + 270, + 495, + 290 + ], + "type": "text", + "content": "\"Generate a movie poster with a sci-fi space theme, a solitary figure standing on an alien planet, facing a massive outpost." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 290, + 270, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 290, + 270, + 300 + ], + "spans": [ + { + "bbox": [ + 115, + 290, + 270, + 300 + ], + "type": "text", + "content": "The poster displays the following text:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 115, + 300, + 211, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 300, + 211, + 310 + ], + "spans": [ + { + "bbox": [ + 115, + 300, + 211, + 310 + ], + "type": "text", + "content": "Title: The Last Outpost" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 310, + 298, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 310, + 298, + 320 + ], + "spans": [ + { + "bbox": [ + 115, + 310, + 298, + 320 + ], + "type": "text", + "content": "Subtitle: When the stars fall, the truth rises" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 115, + 321, + 169, + 330 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 321, + 169, + 330 + ], + "spans": [ + { + "bbox": [ + 115, + 321, + 169, + 330 + ], + "type": "text", + "content": "Information:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 115, + 331, + 225, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 331, + 225, + 341 + ], + "spans": [ + { + "bbox": [ + 115, + 331, + 225, + 341 + ], + "type": "text", + "content": "Produced by Jackson Ward" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 115, + 342, + 206, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 342, + 206, + 351 + ], + "spans": [ + { + "bbox": [ + 115, + 342, + 206, + 351 + ], + "type": "text", + "content": "Music by Aria Calloway" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 115, + 351, + 227, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 351, + 227, + 361 + ], + "spans": [ + { + "bbox": [ + 115, + 351, + 227, + 361 + ], + "type": "text", + "content": "Screenplay by Elena Sharpe" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 115, + 361, + 227, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 361, + 227, + 372 + ], + "spans": [ + { + "bbox": [ + 115, + 361, + 227, + 372 + ], + "type": "text", + "content": "Directed By Sylvia Hartman" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 115, + 372, + 487, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 372, + 487, + 411 + ], + "spans": [ + { + "bbox": [ + 115, + 372, + 487, + 411 + ], + "type": "text", + "content": "\"A visually stunning and narratively gripping exploration of the unknown. The Last Outpost masterfully blends elements of science fiction, mystery, and psychological thriller, creating a hauntingly atmospheric journey that will leave audiences on the edge of their seats.\" -- Global Film Review\"." + } + ] + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 111, + 415, + 194, + 540 + ], + "blocks": [ + { + "bbox": [ + 111, + 415, + 194, + 540 + ], + "lines": [ + { + "bbox": [ + 111, + 415, + 194, + 540 + ], + "spans": [ + { + "bbox": [ + 111, + 415, + 194, + 540 + ], + "type": "image", + "image_path": "e1dc82e6233835aef84e7c0d62fd27535b3b4aee7f6bc4beab765dacdbbdc4c1.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 139, + 547, + 168, + 556 + ], + "lines": [ + { + "bbox": [ + 139, + 547, + 168, + 556 + ], + "spans": [ + { + "bbox": [ + 139, + 547, + 168, + 556 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 195, + 415, + 282, + 541 + ], + "blocks": [ + { + "bbox": [ + 195, + 415, + 282, + 541 + ], + "lines": [ + { + "bbox": [ + 195, + 415, + 282, + 541 + ], + "spans": [ + { + "bbox": [ + 195, + 415, + 282, + 541 + ], + "type": "image", + "image_path": "59f2bdbb4c18ff5b2eaf1af8462de2d557b98afb5403da1a76ec1f639e6017a6.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 226, + 547, + 257, + 556 + ], + "lines": [ + { + "bbox": [ + 226, + 547, + 257, + 556 + ], + "spans": [ + { + "bbox": [ + 226, + 547, + 257, + 556 + ], + "type": "text", + "content": "POSTA" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 282, + 415, + 377, + 541 + ], + "blocks": [ + { + "bbox": [ + 282, + 415, + 377, + 541 + ], + "lines": [ + { + "bbox": [ + 282, + 415, + 377, + 541 + ], + "spans": [ + { + "bbox": [ + 282, + 415, + 377, + 541 + ], + "type": "image", + "image_path": "3c2edc361f25ec06384618fcf44a118efef67ed2c14c3a957eec1c7c432abf66.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 293, + 547, + 359, + 556 + ], + "lines": [ + { + "bbox": [ + 293, + 547, + 359, + 556 + ], + "spans": [ + { + "bbox": [ + 293, + 547, + 359, + 556 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 378, + 415, + 504, + 541 + ], + "blocks": [ + { + "bbox": [ + 378, + 415, + 504, + 541 + ], + "lines": [ + { + "bbox": [ + 378, + 415, + 504, + 541 + ], + "spans": [ + { + "bbox": [ + 378, + 415, + 504, + 541 + ], + "type": "image", + "image_path": "1751faef203209061a6fae572da8f65fff10f9f74ef783748441d19f7f4a2be9.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 405, + 547, + 460, + 557 + ], + "lines": [ + { + "bbox": [ + 405, + 547, + 460, + 557 + ], + "spans": [ + { + "bbox": [ + 405, + 547, + 460, + 557 + ], + "type": "text", + "content": "Ideogram 3.0" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 77, + 657, + 533, + 734 + ], + "lines": [ + { + "bbox": [ + 77, + 657, + 533, + 734 + ], + "spans": [ + { + "bbox": [ + 77, + 657, + 533, + 734 + ], + "type": "text", + "content": "Figure 6: Task: Long text rendering. Generate extended, coherent, and prompt-consistent textual content on an image. Setup: Evaluations are conducted against advanced baselines including POSTA [12], Gemini 2.0 Flash [99], Ideogram 3.0 [2], and Playground-v3 [64]. Observations: GPT-4o excels in long text rendering by producing coherent, detailed textual information with very few character errors. In contrast, models like Gemini 2.0 Flash, Ideogram 3.0, and Playground-v3 often exhibit increased errors or generate vague text when faced with lengthy prompts, while POSTA's tailored multi-step pipeline sometimes yields competitive precision. Overall, GPT-4o outperforms most commercial models and rivals specialized research approaches in extended text generation." + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "bbox": [ + 113, + 559, + 162, + 569 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 559, + 162, + 569 + ], + "spans": [ + { + "bbox": [ + 113, + 559, + 162, + 569 + ], + "type": "text", + "content": "Input Text:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 112, + 570, + 490, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 570, + 490, + 610 + ], + "spans": [ + { + "bbox": [ + 112, + 570, + 490, + 610 + ], + "type": "text", + "content": "\"Create a poster with the theme of a Journey of Solitude. The background should depict a lone figure walking toward an unusable form of transportation. The scene should evoke a sense of being lost, helplessness, and desolation, capturing the emotional weight of losing oneself in a barren, unforgiving landscape." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 113, + 610, + 209, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 610, + 209, + 621 + ], + "spans": [ + { + "bbox": [ + 113, + 610, + 209, + 621 + ], + "type": "text", + "content": "Title: Solitary Journeys" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 113, + 621, + 194, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 621, + 194, + 630 + ], + "spans": [ + { + "bbox": [ + 113, + 621, + 194, + 630 + ], + "type": "text", + "content": "Subtitle: Elara Voss" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 113, + 631, + 345, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 631, + 345, + 640 + ], + "spans": [ + { + "bbox": [ + 113, + 631, + 345, + 640 + ], + "type": "text", + "content": "Information: WANDERING THROUGH THE UNKNOWN\"." + } + ] + } + ], + "index": 33 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 79, + 220, + 92 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 79, + 220, + 92 + ], + "spans": [ + { + "bbox": [ + 127, + 79, + 220, + 92 + ], + "type": "text", + "content": "Long Text Rendering" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 35 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 123, + 231, + 218, + 244 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 231, + 218, + 244 + ], + "spans": [ + { + "bbox": [ + 123, + 231, + 218, + 244 + ], + "type": "text", + "content": "Long Text Rendering" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 212, + 251, + 225, + 264 + ], + "blocks": [ + { + "bbox": [ + 212, + 251, + 225, + 264 + ], + "lines": [ + { + "bbox": [ + 212, + 251, + 225, + 264 + ], + "spans": [ + { + "bbox": [ + 212, + 251, + 225, + 264 + ], + "type": "image", + "image_path": "072dabf79d619daa8eeb4cd2f8867859c2a5525ca4ceff1ee839637ee46f829f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 225, + 254, + 396, + 266 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 254, + 396, + 266 + ], + "spans": [ + { + "bbox": [ + 225, + 254, + 396, + 266 + ], + "type": "text", + "content": "Evaluation: Text Rendering Precision." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 102, + 269, + 188, + 399 + ], + "blocks": [ + { + "bbox": [ + 102, + 269, + 188, + 399 + ], + "lines": [ + { + "bbox": [ + 102, + 269, + 188, + 399 + ], + "spans": [ + { + "bbox": [ + 102, + 269, + 188, + 399 + ], + "type": "image", + "image_path": "37c0d9d0ba1eed41834771a2d8df3690d2b37294a08a5844702161e53aa817ab.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 129, + 404, + 160, + 414 + ], + "lines": [ + { + "bbox": [ + 129, + 404, + 160, + 414 + ], + "spans": [ + { + "bbox": [ + 129, + 404, + 160, + 414 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 119, + 554, + 488, + 566 + ], + "lines": [ + { + "bbox": [ + 119, + 554, + 488, + 566 + ], + "spans": [ + { + "bbox": [ + 119, + 554, + 488, + 566 + ], + "type": "text", + "content": "Figure 7: Task: Long text rendering. The Setup and Observations are the same as Figure 6." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 189, + 269, + 278, + 399 + ], + "blocks": [ + { + "bbox": [ + 189, + 269, + 278, + 399 + ], + "lines": [ + { + "bbox": [ + 189, + 269, + 278, + 399 + ], + "spans": [ + { + "bbox": [ + 189, + 269, + 278, + 399 + ], + "type": "image", + "image_path": "61b9d5a9c147c446387301c4f13acd7ceeccc06f653954410c1dc398d1fd4bc3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 217, + 404, + 249, + 414 + ], + "lines": [ + { + "bbox": [ + 217, + 404, + 249, + 414 + ], + "spans": [ + { + "bbox": [ + 217, + 404, + 249, + 414 + ], + "type": "text", + "content": "POSTA" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 278, + 269, + 376, + 399 + ], + "blocks": [ + { + "bbox": [ + 278, + 269, + 376, + 399 + ], + "lines": [ + { + "bbox": [ + 278, + 269, + 376, + 399 + ], + "spans": [ + { + "bbox": [ + 278, + 269, + 376, + 399 + ], + "type": "image", + "image_path": "96c1a55c7c5f312a71c89d34f708b0e4f8da4556922ffdee41d673b76ea82dad.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 293, + 404, + 361, + 415 + ], + "lines": [ + { + "bbox": [ + 293, + 404, + 361, + 415 + ], + "spans": [ + { + "bbox": [ + 293, + 404, + 361, + 415 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 377, + 269, + 507, + 399 + ], + "blocks": [ + { + "bbox": [ + 377, + 269, + 507, + 399 + ], + "lines": [ + { + "bbox": [ + 377, + 269, + 507, + 399 + ], + "spans": [ + { + "bbox": [ + 377, + 269, + 507, + 399 + ], + "type": "image", + "image_path": "faf4eca5a3720dff227ce6a4ad8f4898de848e4e08fec90adf87952d14446f7e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 418, + 404, + 477, + 415 + ], + "lines": [ + { + "bbox": [ + 418, + 404, + 477, + 415 + ], + "spans": [ + { + "bbox": [ + 418, + 404, + 477, + 415 + ], + "type": "text", + "content": "Playground-v3" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 426, + 159, + 436 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 426, + 159, + 436 + ], + "spans": [ + { + "bbox": [ + 107, + 426, + 159, + 436 + ], + "type": "text", + "content": "Input Text:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 437, + 485, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 437, + 485, + 468 + ], + "spans": [ + { + "bbox": [ + 107, + 437, + 485, + 468 + ], + "type": "text", + "content": "\"Please generate an artistic and stylized promotional poster. The style is an artistic painting style. The theme is about nature and city. The poster displays the following information: Title: Fragmented Harmony" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 468, + 328, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 468, + 328, + 479 + ], + "spans": [ + { + "bbox": [ + 107, + 468, + 328, + 479 + ], + "type": "text", + "content": "Subtitle Between the steel and sky, life finds its way." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 479, + 498, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 479, + 498, + 532 + ], + "spans": [ + { + "bbox": [ + 106, + 479, + 498, + 532 + ], + "type": "text", + "content": "Information: Amid the towering strucions and the quiet persistence of nature, a delicate balance emerges. The complex and often contradictory relationship between urban development and the natural world reveals itself in fleeting moments of harmony. Though fragmented, life continues, threading its way through the shadows of progress. Here, conflict and coexistence form an intricate dance--sometimes at odds, sometimes in unexpected unity\"." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 207, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 207, + 83 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 207, + 83 + ], + "type": "text", + "content": "2.1.3 Document Generation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 91, + 533, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 91, + 533, + 125 + ], + "spans": [ + { + "bbox": [ + 77, + 91, + 533, + 125 + ], + "type": "text", + "content": "We also explore a novel task: document image generation with GPT-4o, comparing its performance with Gemini 2.0 Flash [99] and Playground-v3 [64]. As shown in Figure 8 - 10, GPT-4o produces document images with cleaner layouts and more consistent content." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 140, + 240, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 140, + 240, + 152 + ], + "spans": [ + { + "bbox": [ + 104, + 140, + 240, + 152 + ], + "type": "text", + "content": "Document Image Generation" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 205, + 161, + 218, + 175 + ], + "blocks": [ + { + "bbox": [ + 205, + 161, + 218, + 175 + ], + "lines": [ + { + "bbox": [ + 205, + 161, + 218, + 175 + ], + "spans": [ + { + "bbox": [ + 205, + 161, + 218, + 175 + ], + "type": "image", + "image_path": "7ff25ac80d044220739d61284585a070ee9142eede21fe8dbf47dff3cb2ffa9c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 219, + 164, + 398, + 176 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 164, + 398, + 176 + ], + "spans": [ + { + "bbox": [ + 219, + 164, + 398, + 176 + ], + "type": "text", + "content": "Evaluation: Text Rendering Precision." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 202, + 203, + 211 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 202, + 203, + 211 + ], + "spans": [ + { + "bbox": [ + 115, + 202, + 203, + 211 + ], + "type": "text", + "content": "Attention Is All You Need" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 113, + 219, + 208, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 219, + 208, + 236 + ], + "spans": [ + { + "bbox": [ + 113, + 219, + 208, + 236 + ], + "type": "text", + "content": "Ashish Vaswani Noam Shazeer Niki Parmar Jakob Uszkoreit Lilion Jones Aidan N.Gomez Lukasz Kaiser IIIa Polosukhin" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 149, + 246, + 170, + 252 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 246, + 170, + 252 + ], + "spans": [ + { + "bbox": [ + 149, + 246, + 170, + 252 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 97, + 254, + 225, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 254, + 225, + 328 + ], + "spans": [ + { + "bbox": [ + 97, + 254, + 225, + 328 + ], + "type": "text", + "content": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on tw machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 247, + 256, + 307, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 256, + 307, + 262 + ], + "spans": [ + { + "bbox": [ + 247, + 256, + 307, + 262 + ], + "type": "text", + "content": "Attention Is All You Need" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 243, + 262, + 347, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 243, + 262, + 347, + 329 + ], + "spans": [ + { + "bbox": [ + 243, + 262, + 347, + 329 + ], + "type": "text", + "content": "Ashish Vourakis, NM Nazarov, NJI Parmar, Jbab Udekoreli, Lillion Jones, Adiinil S. W. Coomce, Lakota Kiber, Pala Poslashov, and M. A. D. G. R. Smith. 2017. The neural network models are based on recurrent or conventional neural networks in an encoder-decoder loss. The best produced models also connect the encoder and decoder loss through an attention mechanism. As, we propose a new study mechanism for a machine, the Transformer, based attention mechanisms, dispensing with the same weight as the encoder-decoder loss. The results of the training tasks show these models to be superior in quality while being more parsibilized and requiring significantly less time to time to move train. Our model achieves 84.2 BLUE in the WMT-50 Go-to-translation task, which is comparable to the performance of our previous work [3]. In addition, our WMT-30 Pragmatic task, our model established a new single-model state-of-the-art-state-of-the-art BLUE score of 41.8 when " + }, + { + "bbox": [ + 243, + 262, + 347, + 329 + ], + "type": "inline_equation", + "content": "\\alpha = 5" + }, + { + "bbox": [ + 243, + 262, + 347, + 329 + ], + "type": "text", + "content": " training for 3.5 days on GPUs after fraction of the training costs of the best models from literature. We propose Transformer generalizes well by applying it successfully to English syntacticity parsing both with large and limited training data." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 369, + 181, + 432, + 187 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 181, + 432, + 187 + ], + "spans": [ + { + "bbox": [ + 369, + 181, + 432, + 187 + ], + "type": "text", + "content": "Attention Is All You Need" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 369, + 191, + 424, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 191, + 424, + 213 + ], + "spans": [ + { + "bbox": [ + 369, + 191, + 424, + 213 + ], + "type": "text", + "content": "Ashish A. [Al] You. Wea a nono' Ainon 1 \nAshish Yawani, Naqadzaree, laokotri \nAlok Uzzotner, Anokalika Sanik, Jokoslav adar, Gosak III Ploosukhaini \nAlok Uzzotner, Anokalika Sanik" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 369, + 222, + 507, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 222, + 507, + 331 + ], + "spans": [ + { + "bbox": [ + 369, + 222, + 507, + 331 + ], + "type": "text", + "content": "Antext, exotocic sequecra transoedscnncs on cbr be caes baccared on bracococcyne nort bell netiabon an ecocycclion, an ecocyclon. TeTrane: the ensonnnmnsn neeepnckian. Epipcnie rile kceely on meenctiny As adeterdiencr. \nnpopioors sonr tonarwamchim. I mtnr. vorti. inenpoea a dedusum minnyss onomcrh. cortordone.lora ontata tose or uin hoperiosper. \nThe rorner is s oovl dtt maive de acemnccnodkdu aleu cormunb-dlr bing dvl-ndr 016de and mechance \n11 Dae nucnccnng \nceso nucnse \n12 - eannnr er on attonne amehnes asnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnncnnccnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 143, + 338, + 178, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 338, + 178, + 348 + ], + "spans": [ + { + "bbox": [ + 143, + 338, + 178, + 348 + ], + "type": "text", + "content": "GPT40" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 255, + 338, + 334, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 338, + 334, + 350 + ], + "spans": [ + { + "bbox": [ + 255, + 338, + 334, + 350 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 406, + 337, + 473, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 337, + 473, + 350 + ], + "spans": [ + { + "bbox": [ + 406, + 337, + 473, + 350 + ], + "type": "text", + "content": "Playground-v3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 99, + 355, + 159, + 367 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 355, + 159, + 367 + ], + "spans": [ + { + "bbox": [ + 99, + 355, + 159, + 367 + ], + "type": "text", + "content": "Input Text:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 97, + 369, + 487, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 369, + 487, + 396 + ], + "spans": [ + { + "bbox": [ + 97, + 369, + 487, + 396 + ], + "type": "text", + "content": "\"Generate A realistic screenshot of the first page of the Paper from the following information:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 97, + 399, + 252, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 399, + 252, + 411 + ], + "spans": [ + { + "bbox": [ + 97, + 399, + 252, + 411 + ], + "type": "text", + "content": "Title: Attention Is All You Need" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 97, + 414, + 510, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 414, + 510, + 441 + ], + "spans": [ + { + "bbox": [ + 97, + 414, + 510, + 441 + ], + "type": "text", + "content": "Author List: Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 97, + 444, + 509, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 444, + 509, + 653 + ], + "spans": [ + { + "bbox": [ + 97, + 444, + 509, + 653 + ], + "type": "text", + "content": "Abstract: The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.\"" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 77, + 667, + 533, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 667, + 533, + 700 + ], + "spans": [ + { + "bbox": [ + 77, + 667, + 533, + 700 + ], + "type": "text", + "content": "Figure 8: Task: Document image generation. Setup: Each row shows a text prompt and the generated outputs from GPT-4o, Gemini 2.0 Flash [99], and Playground-v3 [64]. Observation: GPT-4o can generate more consistent and accurate font and format than the other two models." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 119, + 239, + 133 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 119, + 239, + 133 + ], + "spans": [ + { + "bbox": [ + 104, + 119, + 239, + 133 + ], + "type": "text", + "content": "Document Image Generation" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 205, + 140, + 217, + 154 + ], + "blocks": [ + { + "bbox": [ + 205, + 140, + 217, + 154 + ], + "lines": [ + { + "bbox": [ + 205, + 140, + 217, + 154 + ], + "spans": [ + { + "bbox": [ + 205, + 140, + 217, + 154 + ], + "type": "image", + "image_path": "e960d89a9332326f5fac58239c1d5784d9e237d7ccede93ea1c82462b3f589b3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 218, + 143, + 398, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 218, + 143, + 398, + 156 + ], + "spans": [ + { + "bbox": [ + 218, + 143, + 398, + 156 + ], + "type": "text", + "content": "Evaluation: Text Rendering Precision." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 99, + 178, + 232, + 198 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 178, + 232, + 198 + ], + "spans": [ + { + "bbox": [ + 99, + 178, + 232, + 198 + ], + "type": "text", + "content": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 206, + 205, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 206, + 205, + 220 + ], + "spans": [ + { + "bbox": [ + 105, + 206, + 205, + 220 + ], + "type": "text", + "content": "Jacob Devlin Ming-Wei Chang Kenton Lee Kristina Toutanova" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 154, + 230, + 176, + 237 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 230, + 176, + 237 + ], + "spans": [ + { + "bbox": [ + 154, + 230, + 176, + 237 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 97, + 239, + 235, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 239, + 235, + 290 + ], + "spans": [ + { + "bbox": [ + 97, + 239, + 235, + 290 + ], + "type": "text", + "content": "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result; the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 97, + 290, + 237, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 290, + 237, + 319 + ], + "spans": [ + { + "bbox": [ + 97, + 290, + 237, + 319 + ], + "type": "text", + "content": "BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to " + }, + { + "bbox": [ + 97, + 290, + 237, + 319 + ], + "type": "inline_equation", + "content": "80.5\\%" + }, + { + "bbox": [ + 97, + 290, + 237, + 319 + ], + "type": "text", + "content": " (7.7% point absolute improvement), MultiNLI accuracy to " + }, + { + "bbox": [ + 97, + 290, + 237, + 319 + ], + "type": "inline_equation", + "content": "\\mathcal{S}6.7\\%" + }, + { + "bbox": [ + 97, + 290, + 237, + 319 + ], + "type": "text", + "content": " (4.6% absolute improvement), SQAud v1.1 question answering Test F1 to 93.2 (1.5 point absolute" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 148, + 324, + 183, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 324, + 183, + 335 + ], + "spans": [ + { + "bbox": [ + 148, + 324, + 183, + 335 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 255, + 178, + 357, + 196 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 178, + 357, + 196 + ], + "spans": [ + { + "bbox": [ + 255, + 178, + 357, + 196 + ], + "type": "text", + "content": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 257, + 198, + 354, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 198, + 354, + 210 + ], + "spans": [ + { + "bbox": [ + 257, + 198, + 354, + 210 + ], + "type": "text", + "content": "Ashlor Jacob Doslin Ming Wei Chang, Kenton Lee Abstrut Win, Karinlin Touranosa" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 255, + 210, + 360, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 210, + 360, + 254 + ], + "spans": [ + { + "bbox": [ + 255, + 210, + 360, + 254 + ], + "type": "text", + "content": "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Cryostatnet as Transformer. Unlike recnec mean, BERT is designed by pre-train deep bidirectional representations from nonshaded text by jasily selfconforming on both text 1st&4, xavier coint eorect, tate 1st&2 to win 1st, sacp0-twincent BERT model can be fine mused with just one additional output layer to create of dafve for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 255, + 259, + 361, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 259, + 361, + 297 + ], + "spans": [ + { + "bbox": [ + 255, + 259, + 361, + 297 + ], + "type": "text", + "content": "BERT is conceptually simple and empirically powerful. It obtains now state-of-the-art results on eleven evertweaven multimoment language processing tasks, including GLUE score to " + }, + { + "bbox": [ + 255, + 259, + 361, + 297 + ], + "type": "inline_equation", + "content": "80.5\\%" + }, + { + "bbox": [ + 255, + 259, + 361, + 297 + ], + "type": "text", + "content": " (7.69 absolute improvement), pushing the GLUE alloselect improvement). MulaNLI accuracy to " + }, + { + "bbox": [ + 255, + 259, + 361, + 297 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\mathrm{M}} = 0.1" + }, + { + "bbox": [ + 255, + 259, + 361, + 297 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 255, + 259, + 361, + 297 + ], + "type": "inline_equation", + "content": "\\nu_{\\mathrm{L}} < \\varepsilon_{\\mathrm{M}} / \\varepsilon_{\\mathrm{M}}" + }, + { + "bbox": [ + 255, + 259, + 361, + 297 + ], + "type": "text", + "content": " (1.5 point absolute improvement) and SQoAD v2.0 Test F1 to 83.1 (3.1 point absolute improvement)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 268, + 325, + 347, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 268, + 325, + 347, + 336 + ], + "spans": [ + { + "bbox": [ + 268, + 325, + 347, + 336 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 380, + 182, + 504, + 196 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 380, + 182, + 504, + 196 + ], + "spans": [ + { + "bbox": [ + 380, + 182, + 504, + 196 + ], + "type": "text", + "content": "BERT: Pre-training on Lepi Bidellar Tansson Translons for Language Understond mderance litting' cetting from t cowf henvaming" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 380, + 198, + 502, + 217 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 380, + 198, + 399, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 380, + 198, + 399, + 202 + ], + "spans": [ + { + "bbox": [ + 380, + 198, + 399, + 202 + ], + "type": "text", + "content": "Author, List:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 380, + 203, + 502, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 380, + 203, + 502, + 213 + ], + "spans": [ + { + "bbox": [ + 380, + 203, + 502, + 213 + ], + "type": "text", + "content": "J Asbad Devlin, Yiw Changuaguagaa Kionn age rspgectangane cans \n" + }, + { + "bbox": [ + 380, + 203, + 502, + 213 + ], + "type": "inline_equation", + "content": "^{a}" + }, + { + "bbox": [ + 380, + 203, + 502, + 213 + ], + "type": "text", + "content": "pressin liKistcn-Toutanfa" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 380, + 213, + 421, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 380, + 213, + 421, + 217 + ], + "spans": [ + { + "bbox": [ + 380, + 213, + 421, + 217 + ], + "type": "text", + "content": "represeons-Uintanlvania" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 380, + 218, + 396, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 380, + 218, + 396, + 224 + ], + "spans": [ + { + "bbox": [ + 380, + 218, + 396, + 224 + ], + "type": "text", + "content": "A" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 380, + 227, + 511, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 380, + 227, + 511, + 260 + ], + "spans": [ + { + "bbox": [ + 380, + 227, + 511, + 260 + ], + "type": "text", + "content": "We introduce a new languagegretrovetercendentiale monoclin klonstionist monole conBldfecarstaadss reprenters from nemer raje Sfiflnonanecones. desessnissrall ranauagaleafdelfe xyn unming on hnlaesaeare two ploddes also the por-entant canteletory a state-vwraon one-on-one coffice of anisotropy, and the ploso-syntropic colective of states. Of s2s212310 pain or questionbmporansuansluangene Tcnf?f to ingest ingf Sf10 tto 46 w. (I, test),marshersonalizne imnance immmens" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 380, + 268, + 512, + 319 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 380, + 268, + 490, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 380, + 268, + 490, + 274 + ], + "spans": [ + { + "bbox": [ + 380, + 268, + 490, + 274 + ], + "type": "text", + "content": "BERT is conceptually imlilienenarplenpholcft-nu surate-fine-ams" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 380, + 274, + 504, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 380, + 274, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 380, + 274, + 504, + 289 + ], + "type": "text", + "content": "ronen 1 oonranaalwauanu viipopluoteforocnirandinns inget caught anourage, vovlurvulgina for nain. 2004. The use of the word \"sulfur\" in the text is a question envoing SUIF u697." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 380, + 289, + 512, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 380, + 289, + 512, + 300 + ], + "spans": [ + { + "bbox": [ + 380, + 289, + 512, + 300 + ], + "type": "text", + "content": "- GBFEscors/aanoreqquasurf and Squad w.10 aninlvalte 83.7% 4.6% (X) \n- TeST fto onop:11.3% (x)" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 380, + 303, + 511, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 380, + 303, + 511, + 319 + ], + "spans": [ + { + "bbox": [ + 380, + 303, + 511, + 319 + ], + "type": "text", + "content": "→ BERT is cponconuynlyrsnaintally pocefine-at-ut: ouvah176. (JET v.c.37% quinting anguen linyuH-aCLS sccorts onoonssthe sonea 4000A/AresoVc LEAU pioiHcB: gnrmaeh an epesrourinans A7c)0v o.o35 aed 1170" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 414, + 324, + 481, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 414, + 324, + 481, + 338 + ], + "spans": [ + { + "bbox": [ + 414, + 324, + 481, + 338 + ], + "type": "text", + "content": "Playground-v3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 99, + 349, + 158, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 349, + 158, + 361 + ], + "spans": [ + { + "bbox": [ + 99, + 349, + 158, + 361 + ], + "type": "text", + "content": "Input Text:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 97, + 362, + 483, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 362, + 483, + 389 + ], + "spans": [ + { + "bbox": [ + 97, + 362, + 483, + 389 + ], + "type": "text", + "content": "Generate A realistic screenshot of the first page of the Paper from the following information:" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 97, + 392, + 449, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 392, + 449, + 422 + ], + "spans": [ + { + "bbox": [ + 97, + 392, + 449, + 422 + ], + "type": "text", + "content": "Title: BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 97, + 423, + 511, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 423, + 511, + 555 + ], + "spans": [ + { + "bbox": [ + 97, + 423, + 511, + 555 + ], + "type": "text", + "content": "Author List: Jacob Devlin, Ming-Wei Chang, Kenton Lee, Kristina Toutanova \nAbstract: We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 97, + 558, + 511, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 558, + 511, + 632 + ], + "spans": [ + { + "bbox": [ + 97, + 558, + 511, + 632 + ], + "type": "text", + "content": "BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to " + }, + { + "bbox": [ + 97, + 558, + 511, + 632 + ], + "type": "inline_equation", + "content": "80.5\\%" + }, + { + "bbox": [ + 97, + 558, + 511, + 632 + ], + "type": "text", + "content": " (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement)." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 108, + 663, + 500, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 663, + 500, + 676 + ], + "spans": [ + { + "bbox": [ + 108, + 663, + 500, + 676 + ], + "type": "text", + "content": "Figure 9: Task: Document image generation. The Setup and Observations are the same as Fig. 8." + } + ] + } + ], + "index": 32 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 205, + 121, + 217, + 137 + ], + "blocks": [ + { + "bbox": [ + 205, + 121, + 217, + 137 + ], + "lines": [ + { + "bbox": [ + 205, + 121, + 217, + 137 + ], + "spans": [ + { + "bbox": [ + 205, + 121, + 217, + 137 + ], + "type": "image", + "image_path": "9c2265499ec9f6a0c6e61a8914311c8b94d20424cd7a53a126ce5f30f3f6bf33.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 219, + 125, + 398, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 125, + 398, + 138 + ], + "spans": [ + { + "bbox": [ + 219, + 125, + 398, + 138 + ], + "type": "text", + "content": "Evaluation: Text Rendering Precision." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 168, + 211, + 198 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 168, + 211, + 198 + ], + "spans": [ + { + "bbox": [ + 104, + 168, + 211, + 198 + ], + "type": "text", + "content": "You Only Look Once: Unified, Real-Time Object Detection" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 205, + 220, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 205, + 220, + 217 + ], + "spans": [ + { + "bbox": [ + 107, + 205, + 220, + 217 + ], + "type": "text", + "content": "Joseph Redmon, Santosh Divvala, Ross Girshick Ali Farhadi" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 153, + 226, + 174, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 226, + 174, + 232 + ], + "spans": [ + { + "bbox": [ + 153, + 226, + 174, + 232 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 102, + 232, + 230, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 232, + 230, + 274 + ], + "spans": [ + { + "bbox": [ + 102, + 232, + 230, + 274 + ], + "type": "text", + "content": "We present YOLO, a new approach to object detection. Prior work on object detection repurposes classifiers to perform detection. Instead, we frame object detection as a regression problem to spatially separated bounding boxes and associated class probabilities. A single neural network predicts bounding boxes and class probabilities directly from full images in one evaluation. Since the whole detection pipeline is a single network, it can be optimized end-to-end directly on detection performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 102, + 274, + 230, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 274, + 230, + 307 + ], + "spans": [ + { + "bbox": [ + 102, + 274, + 230, + 307 + ], + "type": "text", + "content": "Our unified architecture is extremely fast. Our base YOLO model processes images in real-time at 45 frames per second. A smaller version of the network. Fast YOLO, processes an astounding 155 frames per second while still achieving double the mAP of other real-time detectors. Compared to state-of-the-art detection systems YOLO makes more localization errors but is far less likely to pred" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 254, + 171, + 355, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 254, + 171, + 355, + 185 + ], + "spans": [ + { + "bbox": [ + 254, + 171, + 355, + 185 + ], + "type": "text", + "content": "You Only Look Once: Unlimited, Time/Indirect Decciption \nAuthor:Lesser Jowesh Redmonial Siptedthi Adri Farhad" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 253, + 186, + 381, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 186, + 381, + 205 + ], + "spans": [ + { + "bbox": [ + 253, + 186, + 381, + 205 + ], + "type": "text", + "content": "We present YJLO, a new approach to object detection. Driver search uses oleejeon cieeuses to represen or correct deferential finwication. Istegedel, when farstial, we confirmed anapase, agate trius to a signafal or sialipal staphylococcal preprobed boing boos summarilyour heartbodn frontal nater and the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the 100% of the" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 253, + 206, + 304, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 206, + 304, + 210 + ], + "spans": [ + { + "bbox": [ + 253, + 206, + 304, + 210 + ], + "type": "text", + "content": "Acatat Rechun-Yorim Bogae lo rojctc filocly" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 253, + 213, + 381, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 213, + 381, + 234 + ], + "spans": [ + { + "bbox": [ + 253, + 213, + 381, + 234 + ], + "type": "text", + "content": "Our undersonged tandoce boe + ant - or cemoe - aepocemis in a times, bodingly narmabogus haarban. Jnci enwecstic ancoontie fluy fast YOLO pue moebes, 45f rans perennetioles unperenentio- dorensis in reel thane reobexes petarges. ectcylcfom oene trsoute of sucrose princeia of docuta. d. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .." + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 255, + 236, + 384, + 273 + ], + "blocks": [ + { + "bbox": [ + 255, + 236, + 384, + 273 + ], + "lines": [ + { + "bbox": [ + 255, + 236, + 384, + 273 + ], + "spans": [ + { + "bbox": [ + 255, + 236, + 384, + 273 + ], + "type": "table", + "html": "
Abstr. LOC nomencl.
FascicristinFas1/3
MADS-eos-miR-fc::rec_mucmuc
fli-2 (C9-03)fli-25p438%
#868E1 (b-cd)#868ap33<%
#868E1 (b-cd, c-d)#868ap33<%
#868E1 (b-cd, d-c)#868ap33<%
#868E1 (b-cd, e-c)#868ap33<%
#868E1 (b-cd, f-c)#868ap33<%
#868E1 (b-cd, g-c)#868ap33<%
#868E1 (b-cd, h-c)#868ap33<%
#868E1 (b-cd, i-c)#868ap33<%
#868E1 (b-cd, j-c)#868ap33<%
", + "image_path": "5789a11c88dd67fe3ecf18424e18808df9be15ba558dca4d60893646f7ed02f2.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 253, + 274, + 381, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 274, + 381, + 285 + ], + "spans": [ + { + "bbox": [ + 253, + 274, + 381, + 285 + ], + "type": "text", + "content": "aAldofoi, aS; atalfoi: all other anatolian oboforators of otects. 1 oxtinct nands, aHs: all extinctions, for foxtroomes sereis (outcited) I am Alstomos, Rictus sp. to theft, to rott and aftall to sell otles, or licee, and so on." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 253, + 289, + 381, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 289, + 381, + 308 + ], + "spans": [ + { + "bbox": [ + 253, + 289, + 381, + 308 + ], + "type": "text", + "content": "```bash\n#renatIdack Corrgend to state-vtextction system to objects. CN in All and Chon\nPectmon Is dendrites into Commute on ooclastin or Donr Tnp, eutment the\nimagery mod fringes to arf the Articn on A mitronin or aortothetotnoid\nned\n#recognition VOLCOVOLD, Cogenture: GcGmCunlty: VOLCOVOLD" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 148, + 314, + 183, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 314, + 183, + 325 + ], + "spans": [ + { + "bbox": [ + 148, + 314, + 183, + 325 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 276, + 315, + 354, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 315, + 354, + 326 + ], + "spans": [ + { + "bbox": [ + 276, + 315, + 354, + 326 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 397, + 158, + 491, + 174 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 158, + 491, + 174 + ], + "spans": [ + { + "bbox": [ + 397, + 158, + 491, + 174 + ], + "type": "text", + "content": "You Only Look Once: Unified, Real-Time Object Detection" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 397, + 175, + 492, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 175, + 492, + 185 + ], + "spans": [ + { + "bbox": [ + 397, + 175, + 492, + 185 + ], + "type": "text", + "content": "Joseph Redmon, Santosh Dlwala, Ross Girishk, Ali Farhedi Abstract" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 397, + 186, + 511, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 186, + 511, + 225 + ], + "spans": [ + { + "bbox": [ + 397, + 186, + 511, + 225 + ], + "type": "text", + "content": "We presentYOLO, a new approach to object detection. Prior work on object detection epires classifiers to perform detection. Instead, we frame object detection as a regression problem to spatially separated bounding beves and associated class probabilities. A single neural network, predicts brednis bounding boxes and class reliabilities directly from full evaluation. Since the whole detection pipeline is a singkwork, it can be optimised end-to-end directly on detection performance." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 397, + 230, + 507, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 230, + 507, + 291 + ], + "spans": [ + { + "bbox": [ + 397, + 230, + 507, + 291 + ], + "type": "text", + "content": "Our unified architecture is extremely fast. Our base VOLO model precursors images in real-time at 45 frames per second. A smaller version of trieva, Faat VOLO, processes an aetounding 155 frames per second. ¥8 frames per second while clll achieving double the MAP of real-time detectors. Compared is sisl-site detection systems VOLO makes mark deterrms. VOLO makes more lecdiscipli predict fasse ertris to ter en ptiplicit false detections where nothing exists. Finally, VOLO, VOLO lesrs vs every revalur representations of objects all other detection methods, including DPN and R-CNN, by a wide when generalizing from natural images to artwork artwork on both on the Picasso Dataset and the People-Art Dataset." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 421, + 313, + 488, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 421, + 313, + 488, + 326 + ], + "spans": [ + { + "bbox": [ + 421, + 313, + 488, + 326 + ], + "type": "text", + "content": "Playground-v3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 99, + 338, + 158, + 350 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 338, + 158, + 350 + ], + "spans": [ + { + "bbox": [ + 99, + 338, + 158, + 350 + ], + "type": "text", + "content": "Input Text:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 97, + 352, + 486, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 352, + 486, + 378 + ], + "spans": [ + { + "bbox": [ + 97, + 352, + 486, + 378 + ], + "type": "text", + "content": "\"Generate A realistic screenshot of the first page of the Paper from the following information:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 97, + 382, + 400, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 382, + 400, + 396 + ], + "spans": [ + { + "bbox": [ + 97, + 382, + 400, + 396 + ], + "type": "text", + "content": "Title: You Only Look Once: Unified, Real-Time Object Detection" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 97, + 397, + 438, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 397, + 438, + 410 + ], + "spans": [ + { + "bbox": [ + 97, + 397, + 438, + 410 + ], + "type": "text", + "content": "Author List: Joseph Redmon, Santosh Divvala, Ross Girshick, Ali Farhadi" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 97, + 412, + 509, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 412, + 509, + 514 + ], + "spans": [ + { + "bbox": [ + 97, + 412, + 509, + 514 + ], + "type": "text", + "content": "Abstract: We present YOLO, a new approach to object detection. Prior work on object detection repurposes classifiers to perform detection. Instead, we frame object detection as a regression problem to spatially separated bounding boxes and associated class probabilities. A single neural network predicts bounding boxes and class probabilities directly from full images in one evaluation. Since the whole detection pipeline is a single network, it can be optimized end-to-end directly on detection performance." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 97, + 517, + 504, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 517, + 504, + 650 + ], + "spans": [ + { + "bbox": [ + 97, + 517, + 504, + 650 + ], + "type": "text", + "content": "Our unified architecture is extremely fast. Our base YOLO model processes images in real-time at 45 frames per second. A smaller version of the network, Fast YOLO, processes an astounding 155 frames per second while still achieving double the mAP of other real-time detectors. Compared to state-of-the-art detection systems, YOLO makes more localization errors but is far less likely to predict false detections where nothing exists. Finally, YOLO learns very general representations of objects. It outperforms all other detection methods, including DPM and R-CNN, by a wide margin when generalizing from natural images to artwork on both the Picasso Dataset and the People-Art Dataset.\"" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 681, + 503, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 681, + 503, + 694 + ], + "spans": [ + { + "bbox": [ + 105, + 681, + 503, + 694 + ], + "type": "text", + "content": "Figure 10: Task: Document image generation. The Setup and Observations are the same as Fig. 8." + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 104, + 101, + 239, + 114 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 101, + 239, + 114 + ], + "spans": [ + { + "bbox": [ + 104, + 101, + 239, + 114 + ], + "type": "text", + "content": "Document Image Generation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 235, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 235, + 84 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 235, + 84 + ], + "type": "text", + "content": "2.1.4 Panorama Image Generation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 91, + 534, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 91, + 534, + 191 + ], + "spans": [ + { + "bbox": [ + 77, + 91, + 534, + 191 + ], + "type": "text", + "content": "Panorama image generation aims at creating a 360-degree view of a static scene, enabling immersive and comprehensive visual experiences. In our experiments, we select Pano-SD [119] and Gemini 2.0 Flash [99] as the baselines, with representative results illustrated in Figure 11. The comparisons reveal that while the baseline models can generate coherent panorama-like images with seamlessly connectable left and right sides, GPT-4o struggles to produce a true panorama. In most cases, GPT-4o generates images that approximate a panoramic view but still fall short in ensuring the necessary continuity across the image boundaries. We attribute this limitation to the insufficient representation of panorama images in its training data, as well as a predisposition towards generating images with a higher vertical aspect ratio rather than a wider one. Consequently, in the realm of panorama image generation, GPT-4o is inferior to the existing baseline models." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 112, + 209, + 247, + 222 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 209, + 247, + 222 + ], + "spans": [ + { + "bbox": [ + 112, + 209, + 247, + 222 + ], + "type": "text", + "content": "Panorama Image Generation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 219, + 230, + 392, + 245 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 230, + 392, + 245 + ], + "spans": [ + { + "bbox": [ + 219, + 230, + 392, + 245 + ], + "type": "text", + "content": "Evaluation: Is panorama image?" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 93, + 250, + 206, + 326 + ], + "blocks": [ + { + "bbox": [ + 93, + 250, + 206, + 326 + ], + "lines": [ + { + "bbox": [ + 93, + 250, + 206, + 326 + ], + "spans": [ + { + "bbox": [ + 93, + 250, + 206, + 326 + ], + "type": "image", + "image_path": "1371bb3d27b320f551f1aebcf4ba83faae804c2369cc85b124cb8a6b9e8b9ec5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 211, + 251, + 362, + 326 + ], + "blocks": [ + { + "bbox": [ + 211, + 251, + 362, + 326 + ], + "lines": [ + { + "bbox": [ + 211, + 251, + 362, + 326 + ], + "spans": [ + { + "bbox": [ + 211, + 251, + 362, + 326 + ], + "type": "image", + "image_path": "470abd7050918de412b06326cdd51c4cece55e61b2888569447852d1b87f591d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 367, + 251, + 518, + 326 + ], + "blocks": [ + { + "bbox": [ + 367, + 251, + 518, + 326 + ], + "lines": [ + { + "bbox": [ + 367, + 251, + 518, + 326 + ], + "spans": [ + { + "bbox": [ + 367, + 251, + 518, + 326 + ], + "type": "image", + "image_path": "56f24172af8797ef15310df874ee2c02e4edcdae14624400e6196e1a2638ef82.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 97, + 330, + 514, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 330, + 514, + 354 + ], + "spans": [ + { + "bbox": [ + 97, + 330, + 514, + 354 + ], + "type": "text", + "content": "Input Text: \"Please generate a panorama image: A living room with hardwork floors, a fireplace, and large windows.\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 93, + 357, + 206, + 433 + ], + "blocks": [ + { + "bbox": [ + 93, + 357, + 206, + 433 + ], + "lines": [ + { + "bbox": [ + 93, + 357, + 206, + 433 + ], + "spans": [ + { + "bbox": [ + 93, + 357, + 206, + 433 + ], + "type": "image", + "image_path": "0771e85cb3485940ea65217af54563edb5368d1ed7f290c2f9c5ad8bdccee8ee.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 605, + 534, + 682 + ], + "lines": [ + { + "bbox": [ + 77, + 605, + 534, + 682 + ], + "spans": [ + { + "bbox": [ + 77, + 605, + 534, + 682 + ], + "type": "text", + "content": "Figure 11: Task: Panorama image generation, aiming to create immersive 360-degree views of static scenes. Setup: We compare GPT-4o with established baselines such as Pano-SD [119] and Gemini 2.0 Flash [99] to evaluate the generation of coherent panoramic images. Observations: While the baseline models reliably produce panoramas with seamlessly connected left and right sides, GPT-4o tends to only approximate a panoramic view and struggles to maintain continuity across image boundaries. This shortfall is likely due to limited panorama image representation in its training data and a tendency to generate images with a higher vertical aspect ratio rather than a wider one, rendering it inferior to the baselines in this task." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 211, + 357, + 361, + 433 + ], + "blocks": [ + { + "bbox": [ + 211, + 357, + 361, + 433 + ], + "lines": [ + { + "bbox": [ + 211, + 357, + 361, + 433 + ], + "spans": [ + { + "bbox": [ + 211, + 357, + 361, + 433 + ], + "type": "image", + "image_path": "d163c0b8a05c2d83cbe87b0c94311aadf8a7505417a3639fcd47509e36995332.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 365, + 357, + 518, + 433 + ], + "blocks": [ + { + "bbox": [ + 365, + 357, + 518, + 433 + ], + "lines": [ + { + "bbox": [ + 365, + 357, + 518, + 433 + ], + "spans": [ + { + "bbox": [ + 365, + 357, + 518, + 433 + ], + "type": "image", + "image_path": "eba5be0e2a33e5791d2df2eae6ba493b1daf01dd71d6fba92adff187a344d471.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 97, + 436, + 501, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 436, + 501, + 459 + ], + "spans": [ + { + "bbox": [ + 97, + 436, + 501, + 459 + ], + "type": "text", + "content": "Input Text: \"Please generate a panorama image: A cozy study with built-in bookshelves and a leather.\"" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 93, + 464, + 204, + 539 + ], + "blocks": [ + { + "bbox": [ + 93, + 464, + 204, + 539 + ], + "lines": [ + { + "bbox": [ + 93, + 464, + 204, + 539 + ], + "spans": [ + { + "bbox": [ + 93, + 464, + 204, + 539 + ], + "type": "image", + "image_path": "f735b915cfa2ba7a85ad7998eea453cc54fd14b2e904d305b4cea528c2185f2a.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 209, + 464, + 361, + 539 + ], + "blocks": [ + { + "bbox": [ + 209, + 464, + 361, + 539 + ], + "lines": [ + { + "bbox": [ + 209, + 464, + 361, + 539 + ], + "spans": [ + { + "bbox": [ + 209, + 464, + 361, + 539 + ], + "type": "image", + "image_path": "05f5665f542cca655067ebe6c202394a79f20e88be3af6125a418ad1ddb38552.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 365, + 464, + 518, + 539 + ], + "blocks": [ + { + "bbox": [ + 365, + 464, + 518, + 539 + ], + "lines": [ + { + "bbox": [ + 365, + 464, + 518, + 539 + ], + "spans": [ + { + "bbox": [ + 365, + 464, + 518, + 539 + ], + "type": "image", + "image_path": "ab89160a98b1c9923175245ecb1b778b6c8bedaa2a414c4e3a82178ef3554b45.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 97, + 543, + 483, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 543, + 483, + 567 + ], + "spans": [ + { + "bbox": [ + 97, + 543, + 483, + 567 + ], + "type": "text", + "content": "Input Text: \"Please generate a panorama image: A bedroom with a ceiling fan, gray walls, hardwood floors, a bed, and a TV on the wall.\"" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 131, + 573, + 165, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 573, + 165, + 584 + ], + "spans": [ + { + "bbox": [ + 131, + 573, + 165, + 584 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 247, + 573, + 320, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 573, + 320, + 584 + ], + "spans": [ + { + "bbox": [ + 247, + 573, + 320, + 584 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 421, + 573, + 460, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 421, + 573, + 460, + 584 + ], + "spans": [ + { + "bbox": [ + 421, + 573, + 460, + 584 + ], + "type": "text", + "content": "Pano-SD" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 198, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 198, + 83 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 198, + 83 + ], + "type": "text", + "content": "2.2 Image-to-Image Tasks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 78, + 92, + 173, + 104 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 92, + 173, + 104 + ], + "spans": [ + { + "bbox": [ + 78, + 92, + 173, + 104 + ], + "type": "text", + "content": "2.2.1 Style Transfer" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 76, + 111, + 533, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 111, + 533, + 233 + ], + "spans": [ + { + "bbox": [ + 76, + 111, + 533, + 233 + ], + "type": "text", + "content": "Style transfer is a classic yet evolving task in computer vision, aiming to render an image in a specific artistic style while preserving the original content. It bridges the domains of vision and art, enabling applications such as digital artwork creation, film post-production, and virtual reality environment design. Early approach [33] used convolutional neural networks to separate and recombine content and style representations from images. This seminal work enabled the artistic stylization of photographs by optimizing pixel values to match a desired style. To improve efficiency, Johnson et al. [47] proposed feed-forward networks for real-time style transfer using perceptual losses. Later methods such as AdaIN [43] and WCT [57] enabled arbitrary style transfer without retraining for each new style. Transformer-based models like StyTr² [23] have been introduced to enhance style transfer quality and better preserve structural details. More recently, with the rapid development of image synthesis techniques, especially diffusion models, style transfer has seen further advancements in both quality and controllability. However, transferring specific artistic styles still typically requires a non-trivial amount of training data." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 236, + 533, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 236, + 533, + 293 + ], + "spans": [ + { + "bbox": [ + 76, + 236, + 533, + 293 + ], + "type": "text", + "content": "To comprehensively evaluate the style transfer capability of GPT-4o, we conduct comparisons against several recent competitive models, including Gemini 2.0 Flash [99] and Midjourney v6.1 [75]. Specifically, Figure 12 illustrates style transfer results for natural scenes, while Figure 13 focuses on human facial images. Across a diverse range of styles, such as Monet, Van Gogh, Pixar, Cyberpunk, Snoopy, Disney, Ghibli, and Cubism, GPT-4o demonstrates consistently superior performance in both stylistic fidelity and content preservation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 76, + 296, + 533, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 296, + 533, + 364 + ], + "spans": [ + { + "bbox": [ + 76, + 296, + 533, + 364 + ], + "type": "text", + "content": "Notably, in the case of Ghibli style transfer, GPT-4o exhibits remarkable fidelity to the original artistic aesthetics, closely resembling the target style with vivid color palettes and soft contours. In contrast, both Gemini and Midjourney often produce inconsistent visual styles and textures. Furthermore, GPT-4o excels at preserving fine-grained content details, such as facial structure, earrings, clothing, and hairstyles, which are often misrepresented or lost in the outputs of other models. These results suggest that GPT-4o not only captures high-level style semantics but also maintains strong spatial consistency and semantic alignment." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 99, + 167, + 201, + 179 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 167, + 201, + 179 + ], + "spans": [ + { + "bbox": [ + 99, + 167, + 201, + 179 + ], + "type": "text", + "content": "Prompted Stylization" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 222, + 177, + 381, + 192 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 177, + 381, + 192 + ], + "spans": [ + { + "bbox": [ + 222, + 177, + 381, + 192 + ], + "type": "text", + "content": "Evaluation: Consistency/style." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 94, + 196, + 197, + 265 + ], + "blocks": [ + { + "bbox": [ + 94, + 196, + 197, + 265 + ], + "lines": [ + { + "bbox": [ + 94, + 196, + 197, + 265 + ], + "spans": [ + { + "bbox": [ + 94, + 196, + 197, + 265 + ], + "type": "image", + "image_path": "0fe2326eaaff906398f84779d9f2f8a9b656dcf843adbe8b635b0ebccc3b64c1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 198, + 196, + 306, + 263 + ], + "blocks": [ + { + "bbox": [ + 198, + 196, + 306, + 263 + ], + "lines": [ + { + "bbox": [ + 198, + 196, + 306, + 263 + ], + "spans": [ + { + "bbox": [ + 198, + 196, + 306, + 263 + ], + "type": "image", + "image_path": "41dc891dc5a7c3692e3d168e6b8d7aeecf6ee926ceb54032fac4c7482b335993.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 307, + 195, + 417, + 263 + ], + "blocks": [ + { + "bbox": [ + 307, + 195, + 417, + 263 + ], + "lines": [ + { + "bbox": [ + 307, + 195, + 417, + 263 + ], + "spans": [ + { + "bbox": [ + 307, + 195, + 417, + 263 + ], + "type": "image", + "image_path": "275a5a4359f968af20b14db343e03cc70f52a75880ac223fb413656701f9803b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 418, + 195, + 520, + 263 + ], + "blocks": [ + { + "bbox": [ + 418, + 195, + 520, + 263 + ], + "lines": [ + { + "bbox": [ + 418, + 195, + 520, + 263 + ], + "spans": [ + { + "bbox": [ + 418, + 195, + 520, + 263 + ], + "type": "image", + "image_path": "87edc272818725170e7b6db73459109f3c3967fc70ae14b1e2acffc417d4290d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 187, + 265, + 428, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 265, + 428, + 276 + ], + "spans": [ + { + "bbox": [ + 187, + 265, + 428, + 276 + ], + "type": "text", + "content": "Input Text: \"Generate the Monet style of this picture.\"" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 94, + 277, + 197, + 346 + ], + "blocks": [ + { + "bbox": [ + 94, + 277, + 197, + 346 + ], + "lines": [ + { + "bbox": [ + 94, + 277, + 197, + 346 + ], + "spans": [ + { + "bbox": [ + 94, + 277, + 197, + 346 + ], + "type": "image", + "image_path": "750636006f7b07b0af11afa452e7037a2d9a06614ababcd014be1498c416fd65.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 198, + 277, + 306, + 345 + ], + "blocks": [ + { + "bbox": [ + 198, + 277, + 306, + 345 + ], + "lines": [ + { + "bbox": [ + 198, + 277, + 306, + 345 + ], + "spans": [ + { + "bbox": [ + 198, + 277, + 306, + 345 + ], + "type": "image", + "image_path": "1ccf7d1613f12e0b81fba30f86e01357733d489b633304f1597188ed4f258e67.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 307, + 277, + 417, + 345 + ], + "blocks": [ + { + "bbox": [ + 307, + 277, + 417, + 345 + ], + "lines": [ + { + "bbox": [ + 307, + 277, + 417, + 345 + ], + "spans": [ + { + "bbox": [ + 307, + 277, + 417, + 345 + ], + "type": "image", + "image_path": "a8a15f935780009ae4408165cc5d5abf2965efa5cead6b31539ab6f6570babf3.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 418, + 277, + 520, + 345 + ], + "blocks": [ + { + "bbox": [ + 418, + 277, + 520, + 345 + ], + "lines": [ + { + "bbox": [ + 418, + 277, + 520, + 345 + ], + "spans": [ + { + "bbox": [ + 418, + 277, + 520, + 345 + ], + "type": "image", + "image_path": "577ab214cdb8de2d03a7a2be9b878ed69afb604d28c0d21381d8da756093e973.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 185, + 347, + 440, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 347, + 440, + 357 + ], + "spans": [ + { + "bbox": [ + 185, + 347, + 440, + 357 + ], + "type": "text", + "content": "Input Text: \"Generate the Van Gogh style of this picture.\"" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 94, + 359, + 197, + 427 + ], + "blocks": [ + { + "bbox": [ + 94, + 359, + 197, + 427 + ], + "lines": [ + { + "bbox": [ + 94, + 359, + 197, + 427 + ], + "spans": [ + { + "bbox": [ + 94, + 359, + 197, + 427 + ], + "type": "image", + "image_path": "c464a9f6f3f24ab0f65f5488c132ccea546a4af9ede651088ae6e7a2961516ec.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 553, + 533, + 631 + ], + "lines": [ + { + "bbox": [ + 77, + 553, + 533, + 631 + ], + "spans": [ + { + "bbox": [ + 77, + 553, + 533, + 631 + ], + "type": "text", + "content": "Figure 12: Task: Style transfer, aiming to render an image in a specific artistic style while preserving the original content. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and Midjourney v6.1 [75] on natural scene style transfer across multiple artistic domains. Observations: GPT-4o exhibits significantly better content preservation compared to Midjourney v6.1, maintaining fine-grained content details and structural consistency. In terms of style, it faithfully adheres to the textual description, effectively rendering vivid color palettes and soft contours that characterize the target style. This alignment notably surpasses both Gemini 2.0 Flash and Midjourney v6.1, highlighting GPT-4o's strong capabilities in preserving content and faithfully rendering diverse styles." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 198, + 359, + 306, + 426 + ], + "blocks": [ + { + "bbox": [ + 198, + 359, + 306, + 426 + ], + "lines": [ + { + "bbox": [ + 198, + 359, + 306, + 426 + ], + "spans": [ + { + "bbox": [ + 198, + 359, + 306, + 426 + ], + "type": "image", + "image_path": "67dd368a9fc83144d81f03926e3e616fb1c6241004b301cf8c36f99e0c45dd8d.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 307, + 359, + 417, + 425 + ], + "blocks": [ + { + "bbox": [ + 307, + 359, + 417, + 425 + ], + "lines": [ + { + "bbox": [ + 307, + 359, + 417, + 425 + ], + "spans": [ + { + "bbox": [ + 307, + 359, + 417, + 425 + ], + "type": "image", + "image_path": "badd5fb657f99aaab26d4d7e7efa36a6fb517d65faf566ed3fe8c5ae8f6f35d8.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 418, + 359, + 519, + 425 + ], + "blocks": [ + { + "bbox": [ + 418, + 359, + 519, + 425 + ], + "lines": [ + { + "bbox": [ + 418, + 359, + 519, + 425 + ], + "spans": [ + { + "bbox": [ + 418, + 359, + 519, + 425 + ], + "type": "image", + "image_path": "598045ae955aa43906186813c0efdb19e703aeb859e6842ca9badf105529c011.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 188, + 427, + 424, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 427, + 424, + 438 + ], + "spans": [ + { + "bbox": [ + 188, + 427, + 424, + 438 + ], + "type": "text", + "content": "Input Text: \"Generate the Pixar style of this picture.\"" + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 94, + 440, + 197, + 509 + ], + "blocks": [ + { + "bbox": [ + 94, + 440, + 197, + 509 + ], + "lines": [ + { + "bbox": [ + 94, + 440, + 197, + 509 + ], + "spans": [ + { + "bbox": [ + 94, + 440, + 197, + 509 + ], + "type": "image", + "image_path": "f68ac7b3f9c3b4527403386e94ea70946346ec05ae2dde329bd666632c16d005.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 198, + 440, + 306, + 507 + ], + "blocks": [ + { + "bbox": [ + 198, + 440, + 306, + 507 + ], + "lines": [ + { + "bbox": [ + 198, + 440, + 306, + 507 + ], + "spans": [ + { + "bbox": [ + 198, + 440, + 306, + 507 + ], + "type": "image", + "image_path": "de2131f5c3c16b1cda2aa0dec3ae6fe0689fffc4267105db8583bef2e64b0cad.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 307, + 440, + 416, + 507 + ], + "blocks": [ + { + "bbox": [ + 307, + 440, + 416, + 507 + ], + "lines": [ + { + "bbox": [ + 307, + 440, + 416, + 507 + ], + "spans": [ + { + "bbox": [ + 307, + 440, + 416, + 507 + ], + "type": "image", + "image_path": "a85c872e242b8a76e3a8010c80983519f0fb6a8346e50773ef2e7fab0b390d2a.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 418, + 439, + 520, + 507 + ], + "blocks": [ + { + "bbox": [ + 418, + 439, + 520, + 507 + ], + "lines": [ + { + "bbox": [ + 418, + 439, + 520, + 507 + ], + "spans": [ + { + "bbox": [ + 418, + 439, + 520, + 507 + ], + "type": "image", + "image_path": "1f67f57b31e0864c333e12afce49cff3e3a9e54902f8fabbbf522d8e1b2bcb07.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 186, + 510, + 447, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 510, + 447, + 521 + ], + "spans": [ + { + "bbox": [ + 186, + 510, + 447, + 521 + ], + "type": "text", + "content": "Input Text: \"Generate the Cyberpunk style of this picture.\"" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 117, + 526, + 173, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 526, + 173, + 538 + ], + "spans": [ + { + "bbox": [ + 117, + 526, + 173, + 538 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 235, + 526, + 267, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 526, + 267, + 536 + ], + "spans": [ + { + "bbox": [ + 235, + 526, + 267, + 536 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 323, + 526, + 395, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 526, + 395, + 536 + ], + "spans": [ + { + "bbox": [ + 323, + 526, + 395, + 536 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 430, + 525, + 499, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 430, + 525, + 499, + 537 + ], + "spans": [ + { + "bbox": [ + 430, + 525, + 499, + 537 + ], + "type": "text", + "content": "Midjourney v6.1" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 136, + 76, + 227, + 87 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 76, + 227, + 87 + ], + "spans": [ + { + "bbox": [ + 136, + 76, + 227, + 87 + ], + "type": "text", + "content": "Prompted Stylization" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 244, + 87, + 386, + 98 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 87, + 386, + 98 + ], + "spans": [ + { + "bbox": [ + 244, + 87, + 386, + 98 + ], + "type": "text", + "content": "Evaluation: Consistency/style." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 130, + 99, + 212, + 195 + ], + "blocks": [ + { + "bbox": [ + 130, + 99, + 212, + 195 + ], + "lines": [ + { + "bbox": [ + 130, + 99, + 212, + 195 + ], + "spans": [ + { + "bbox": [ + 130, + 99, + 212, + 195 + ], + "type": "image", + "image_path": "7b58c3184e1b97ea204211d53437c27406bef28c23fb6a50586f6c0f1a5bfe24.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 231, + 99, + 299, + 194 + ], + "blocks": [ + { + "bbox": [ + 231, + 99, + 299, + 194 + ], + "lines": [ + { + "bbox": [ + 231, + 99, + 299, + 194 + ], + "spans": [ + { + "bbox": [ + 231, + 99, + 299, + 194 + ], + "type": "image", + "image_path": "4a7dfd47b3b4f45d4ea17721125d0fe632c1243e9f989c4ef96f7157ca1eac3f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 316, + 99, + 397, + 194 + ], + "blocks": [ + { + "bbox": [ + 316, + 99, + 397, + 194 + ], + "lines": [ + { + "bbox": [ + 316, + 99, + 397, + 194 + ], + "spans": [ + { + "bbox": [ + 316, + 99, + 397, + 194 + ], + "type": "image", + "image_path": "1a4e0134e2181e538cfa34d801a9a29589af46e4564af10bde2b0a48bdef3123.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 407, + 99, + 488, + 194 + ], + "blocks": [ + { + "bbox": [ + 407, + 99, + 488, + 194 + ], + "lines": [ + { + "bbox": [ + 407, + 99, + 488, + 194 + ], + "spans": [ + { + "bbox": [ + 407, + 99, + 488, + 194 + ], + "type": "image", + "image_path": "0e6d7e6671a48677bfae227082d4c8fb8b48df29d71b55960f2c402f7f628630.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 201, + 195, + 425, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 195, + 425, + 205 + ], + "spans": [ + { + "bbox": [ + 201, + 195, + 425, + 205 + ], + "type": "text", + "content": "Input Text: \"Generate the Simpsons style of this picture.\"" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 130, + 205, + 212, + 300 + ], + "blocks": [ + { + "bbox": [ + 130, + 205, + 212, + 300 + ], + "lines": [ + { + "bbox": [ + 130, + 205, + 212, + 300 + ], + "spans": [ + { + "bbox": [ + 130, + 205, + 212, + 300 + ], + "type": "image", + "image_path": "9e5e6d93ce20c260e565400b12fea89bd8bc529187af868dd712070ab1c328f8.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 231, + 205, + 298, + 300 + ], + "blocks": [ + { + "bbox": [ + 231, + 205, + 298, + 300 + ], + "lines": [ + { + "bbox": [ + 231, + 205, + 298, + 300 + ], + "spans": [ + { + "bbox": [ + 231, + 205, + 298, + 300 + ], + "type": "image", + "image_path": "8e5d8cc638f17208ad4fb443ff0683574d425e64d854400f82678e0227eca37e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 316, + 205, + 398, + 300 + ], + "blocks": [ + { + "bbox": [ + 316, + 205, + 398, + 300 + ], + "lines": [ + { + "bbox": [ + 316, + 205, + 398, + 300 + ], + "spans": [ + { + "bbox": [ + 316, + 205, + 398, + 300 + ], + "type": "image", + "image_path": "aa5d8bd6655c3924c3860521fa7d3aabfae95679b8638674f96b892057031bdf.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 406, + 205, + 487, + 300 + ], + "blocks": [ + { + "bbox": [ + 406, + 205, + 487, + 300 + ], + "lines": [ + { + "bbox": [ + 406, + 205, + 487, + 300 + ], + "spans": [ + { + "bbox": [ + 406, + 205, + 487, + 300 + ], + "type": "image", + "image_path": "0459972536685fbb8896000173723ed46bcf8309ab5dd8cd3867ab99d79da23a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 204, + 302, + 421, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 204, + 302, + 421, + 312 + ], + "spans": [ + { + "bbox": [ + 204, + 302, + 421, + 312 + ], + "type": "text", + "content": "Input Text: \"Generate the Snoopy style of this picture.\"" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 130, + 312, + 212, + 407 + ], + "blocks": [ + { + "bbox": [ + 130, + 312, + 212, + 407 + ], + "lines": [ + { + "bbox": [ + 130, + 312, + 212, + 407 + ], + "spans": [ + { + "bbox": [ + 130, + 312, + 212, + 407 + ], + "type": "image", + "image_path": "70346a54c2888c50ce4473234e1e5a51c19e9b3df375364d24bb2a98a5a7153a.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 231, + 312, + 298, + 407 + ], + "blocks": [ + { + "bbox": [ + 231, + 312, + 298, + 407 + ], + "lines": [ + { + "bbox": [ + 231, + 312, + 298, + 407 + ], + "spans": [ + { + "bbox": [ + 231, + 312, + 298, + 407 + ], + "type": "image", + "image_path": "959c69e3cc69906dbb804b63c7b374843496df51ef6f1569b100b6937422b431.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 316, + 312, + 398, + 407 + ], + "blocks": [ + { + "bbox": [ + 316, + 312, + 398, + 407 + ], + "lines": [ + { + "bbox": [ + 316, + 312, + 398, + 407 + ], + "spans": [ + { + "bbox": [ + 316, + 312, + 398, + 407 + ], + "type": "image", + "image_path": "fc8ea4a8390ad6f9e57e8f5aab23790b3bd6070cec727042772246ae96706efb.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 406, + 312, + 489, + 407 + ], + "blocks": [ + { + "bbox": [ + 406, + 312, + 489, + 407 + ], + "lines": [ + { + "bbox": [ + 406, + 312, + 489, + 407 + ], + "spans": [ + { + "bbox": [ + 406, + 312, + 489, + 407 + ], + "type": "image", + "image_path": "4c59a95a5b0ba1c9ed0c407b8724156fd81dd1c21744e6e8075190bae52e10fd.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 204, + 408, + 418, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 204, + 408, + 418, + 418 + ], + "spans": [ + { + "bbox": [ + 204, + 408, + 418, + 418 + ], + "type": "text", + "content": "Input Text:\"Generate the Disney style of this picture.\"" + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 130, + 418, + 212, + 513 + ], + "blocks": [ + { + "bbox": [ + 130, + 418, + 212, + 513 + ], + "lines": [ + { + "bbox": [ + 130, + 418, + 212, + 513 + ], + "spans": [ + { + "bbox": [ + 130, + 418, + 212, + 513 + ], + "type": "image", + "image_path": "4dbac1245c6ad15005db4a1d3ccb5720d30e6be34a344f45e008b6f608fc4cb6.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 231, + 418, + 298, + 512 + ], + "blocks": [ + { + "bbox": [ + 231, + 418, + 298, + 512 + ], + "lines": [ + { + "bbox": [ + 231, + 418, + 298, + 512 + ], + "spans": [ + { + "bbox": [ + 231, + 418, + 298, + 512 + ], + "type": "image", + "image_path": "c9580af54f8032d511fbb97fa1439c5960b355c6d89a6da9548e03d6da674129.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 316, + 418, + 398, + 512 + ], + "blocks": [ + { + "bbox": [ + 316, + 418, + 398, + 512 + ], + "lines": [ + { + "bbox": [ + 316, + 418, + 398, + 512 + ], + "spans": [ + { + "bbox": [ + 316, + 418, + 398, + 512 + ], + "type": "image", + "image_path": "a24d3f139726d7234b378db7110157186e07c2e355a0860dbbefa990e5431c62.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 406, + 418, + 488, + 512 + ], + "blocks": [ + { + "bbox": [ + 406, + 418, + 488, + 512 + ], + "lines": [ + { + "bbox": [ + 406, + 418, + 488, + 512 + ], + "spans": [ + { + "bbox": [ + 406, + 418, + 488, + 512 + ], + "type": "image", + "image_path": "1c79b6d712c77204bf93747fafc74f99e0ed9cd9216c159ddbe9f67834fa0ee5.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 206, + 514, + 416, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 514, + 416, + 523 + ], + "spans": [ + { + "bbox": [ + 206, + 514, + 416, + 523 + ], + "type": "text", + "content": "Input Text: \"Generate the Ghibli style of this picture.\"" + } + ] + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 130, + 523, + 212, + 618 + ], + "blocks": [ + { + "bbox": [ + 130, + 523, + 212, + 618 + ], + "lines": [ + { + "bbox": [ + 130, + 523, + 212, + 618 + ], + "spans": [ + { + "bbox": [ + 130, + 523, + 212, + 618 + ], + "type": "image", + "image_path": "899f06b2c3cf50b2d39323fff3222f827df1c1eb152fa1ed8c87460c8158613c.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 649, + 533, + 738 + ], + "lines": [ + { + "bbox": [ + 77, + 649, + 533, + 738 + ], + "spans": [ + { + "bbox": [ + 77, + 649, + 533, + 738 + ], + "type": "text", + "content": "Figure 13: Task: Style transfer, aiming to render an image in a specific artistic style while preserving the original content. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and Midjourney v6.1 [75] on human face style transfer across multiple artistic domains. Observations: GPT-4o exhibits significantly better content preservation compared to Gemini 2.0 Flash and Midjourney v6.1, maintaining fine-grained content details and structural consistency. In terms of style, it faithfully adheres to the textual description, effectively rendering vivid color palettes and soft contours that characterize the target style. This alignment notably surpasses both Gemini 2.0 Flash and Midjourney v6.1 far away, highlighting GPT-4o's strong capabilities in preserving content and faithfully rendering diverse styles." + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 232, + 523, + 298, + 617 + ], + "blocks": [ + { + "bbox": [ + 232, + 523, + 298, + 617 + ], + "lines": [ + { + "bbox": [ + 232, + 523, + 298, + 617 + ], + "spans": [ + { + "bbox": [ + 232, + 523, + 298, + 617 + ], + "type": "image", + "image_path": "eae0730d55066371b3326e9ed3ab31f01ca11d2267e31b1a8c17fc7663f2714f.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 316, + 523, + 398, + 617 + ], + "blocks": [ + { + "bbox": [ + 316, + 523, + 398, + 617 + ], + "lines": [ + { + "bbox": [ + 316, + 523, + 398, + 617 + ], + "spans": [ + { + "bbox": [ + 316, + 523, + 398, + 617 + ], + "type": "image", + "image_path": "d7a7f1fc31d3a9490433f88485c2a950c61d2d98019f2ef1f9666ccb7987ad48.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 406, + 524, + 489, + 617 + ], + "blocks": [ + { + "bbox": [ + 406, + 524, + 489, + 617 + ], + "lines": [ + { + "bbox": [ + 406, + 524, + 489, + 617 + ], + "spans": [ + { + "bbox": [ + 406, + 524, + 489, + 617 + ], + "type": "image", + "image_path": "c65cc8644013933cd8a0d822cb884ca3b9ab0b6f9e149296ab44724e475fa681.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "bbox": [ + 149, + 626, + 198, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 626, + 198, + 636 + ], + "spans": [ + { + "bbox": [ + 149, + 626, + 198, + 636 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 205, + 618, + 421, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 618, + 421, + 627 + ], + "spans": [ + { + "bbox": [ + 205, + 618, + 421, + 627 + ], + "type": "text", + "content": "Input Text: \"Generate the Cubism style of this picture.\"" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 250, + 628, + 278, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 250, + 628, + 278, + 636 + ], + "spans": [ + { + "bbox": [ + 250, + 628, + 278, + 636 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 325, + 628, + 387, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 628, + 387, + 636 + ], + "spans": [ + { + "bbox": [ + 325, + 628, + 387, + 636 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 420, + 627, + 481, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 420, + 627, + 481, + 637 + ], + "spans": [ + { + "bbox": [ + 420, + 627, + 481, + 637 + ], + "type": "text", + "content": "Midjourney v6.1" + } + ] + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 173, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 173, + 85 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 173, + 85 + ], + "type": "text", + "content": "2.2.2 Image Editing" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 91, + 533, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 91, + 533, + 180 + ], + "spans": [ + { + "bbox": [ + 76, + 91, + 533, + 180 + ], + "type": "text", + "content": "Image editing involves modifying the visual elements, composition, or data of an image to achieve a desired outcome. This process can range from minor refinements to significant alterations, while maintaining the integrity of the original image. Over time, image editing techniques have evolved from manual, labor-intensive methods to sophisticated AI-driven approaches. Prior works [10, 30, 9, 120, 5, 29, 4, 40] have demonstrated the ability to perform various editing tasks based on textual instructions, such as adding, removing, or replacing objects; altering backgrounds, colors, or styles; and adjusting the number, size, or positions of objects. However, these models still exhibit limitations in certain scenarios, particularly in preserving non-edited regions, maintaining consistent image characteristics, and ensuring seamless blending between edited and non-edited areas." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 76, + 183, + 533, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 183, + 533, + 263 + ], + "spans": [ + { + "bbox": [ + 76, + 183, + 533, + 263 + ], + "type": "text", + "content": "We compare GPT-4o with MGIE [30], LEDs++ [9], MagicBrush [120], and Gemini 2.0 Flash [99], which are representative of current SOTA methods. These experiments evaluate GPT-4o's subject preservation and instruction-following capabilities to determine its effectiveness compared with existing methods. Comparative results are shown in Figure 14 through Figure 19. We find that GPT-4o achieves performance comparable to, and in many cases surpassing, SOTA baselines in image editing tasks. From these examples, GPT-4o exhibits the fewest failure cases, demonstrating a strong generalization ability across a wide variety of editing tasks. It consistently outperforms baseline models across multiple editing scenarios. We highlight several key observations:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 270, + 278, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 270, + 278, + 283 + ], + "spans": [ + { + "bbox": [ + 105, + 270, + 278, + 283 + ], + "type": "text", + "content": "Strengths of GPT-4o in image editing:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 123, + 285, + 531, + 478 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 123, + 285, + 531, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 285, + 531, + 319 + ], + "spans": [ + { + "bbox": [ + 123, + 285, + 531, + 319 + ], + "type": "text", + "content": "- Fine-grained editing: GPT-4o shows a superior ability to handle fine-grained editing tasks. For instance, in example 2 of Figure 14 and example 1 of Figure 15, GPT-4o successfully modified small, detailed objects such as a toothpick and pink ballerina slippers, outperforming prior methods." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 123, + 320, + 531, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 320, + 531, + 365 + ], + "spans": [ + { + "bbox": [ + 123, + 320, + 531, + 365 + ], + "type": "text", + "content": "- Substantial image transformations: GPT-4o excels at large-scale edits, such as background changes or object transformations, while maintaining visual coherence and realism. These complex edits require robust contextual and semantic understanding. Example 1 in Figure 16 illustrates GPT-4o's effective handling of a major background alteration task." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 123, + 365, + 531, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 365, + 531, + 411 + ], + "spans": [ + { + "bbox": [ + 123, + 365, + 531, + 411 + ], + "type": "text", + "content": "- Subject preservation: GPT-4o demonstrates strong subject-preserving capabilities, avoiding common artifacts such as facial distortions or component loss. In example 2 of Figure 14, GPT-4o retains the content of a drink that Gemini 2.0 Flash erroneously altered. Similarly, in example 5 of Figure 19, GPT-4o best preserves fuselage patterns and textual markings on an airplane." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 123, + 411, + 531, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 411, + 531, + 478 + ], + "spans": [ + { + "bbox": [ + 123, + 411, + 531, + 478 + ], + "type": "text", + "content": "- Instruction and original image adherence: GPT-4o shows a notable ability to follow instructions and maintain the structure of the original image, particularly in style editing and tasks involving object quantity, size, or position. This likely stems from its advanced understanding of both the image content and the editing instructions. For example, Figure 18 demonstrates GPT-4o's capability in style translation. Example 2 in Figure 17 shows its understanding of the term \"orange\" in both textual and visual contexts. A similar ability is illustrated in example 4 of Figure 19." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 480, + 286, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 480, + 286, + 493 + ], + "spans": [ + { + "bbox": [ + 105, + 480, + 286, + 493 + ], + "type": "text", + "content": "- Limitations of GPT-4o in image editing:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 123, + 496, + 531, + 553 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 123, + 496, + 531, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 496, + 531, + 529 + ], + "spans": [ + { + "bbox": [ + 123, + 496, + 531, + 529 + ], + "type": "text", + "content": "- GPT-4o underperforms in scenarios where strict preservation of the original image's lighting, shading, and color tones is required. In such cases, the edited images may exhibit noticeable shifts in visual consistency. This is evident in examples 1 and 5 of Figure 14 and example 4 of Figure 15." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 123, + 530, + 531, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 530, + 531, + 553 + ], + "spans": [ + { + "bbox": [ + 123, + 530, + 531, + 553 + ], + "type": "text", + "content": "- In some cases, GPT-4o may fail to retain image details outside the intended edit region. For instance, example 4 in Figure 14 shows a degradation in image quality in non-targeted areas." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 77, + 563, + 533, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 563, + 533, + 619 + ], + "spans": [ + { + "bbox": [ + 77, + 563, + 533, + 619 + ], + "type": "text", + "content": "In summary, GPT-4o demonstrates substantial advancements in image editing, showing exceptional capabilities in detailed and large-scale edits, subject preservation, and adherence to instructions. While there are limitations in strictly maintaining original image characteristics such as lighting and tonal consistency, GPT-4o significantly reduces failure cases and outperforms existing baselines across a wide range of editing tasks, pushing the boundaries of current SOTA performance." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 203, + 106, + 219, + 119 + ], + "blocks": [ + { + "bbox": [ + 203, + 106, + 219, + 119 + ], + "lines": [ + { + "bbox": [ + 203, + 106, + 219, + 119 + ], + "spans": [ + { + "bbox": [ + 203, + 106, + 219, + 119 + ], + "type": "image", + "image_path": "cde0ac3d92eb933391cdf6879af267478293459d8efb075cb2c215e135eaf5de.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 217, + 106, + 406, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 106, + 406, + 120 + ], + "spans": [ + { + "bbox": [ + 217, + 106, + 406, + 120 + ], + "type": "text", + "content": "Evaluation: Instruction-following / faithful." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 118, + 126, + 206, + 213 + ], + "blocks": [ + { + "bbox": [ + 118, + 126, + 206, + 213 + ], + "lines": [ + { + "bbox": [ + 118, + 126, + 206, + 213 + ], + "spans": [ + { + "bbox": [ + 118, + 126, + 206, + 213 + ], + "type": "image", + "image_path": "413f16191d537cd336a6645867fb4044e2d27954d8e97a6f61443fdadd988968.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 215, + 286, + 224 + ], + "lines": [ + { + "bbox": [ + 119, + 215, + 286, + 224 + ], + "spans": [ + { + "bbox": [ + 119, + 215, + 286, + 224 + ], + "type": "text", + "content": "Input Text: \"Add a notebook to the desk.\"" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 214, + 125, + 302, + 214 + ], + "blocks": [ + { + "bbox": [ + 214, + 125, + 302, + 214 + ], + "lines": [ + { + "bbox": [ + 214, + 125, + 302, + 214 + ], + "spans": [ + { + "bbox": [ + 214, + 125, + 302, + 214 + ], + "type": "image", + "image_path": "e6edd2726b0a0b32b22ba7d28c24eb54d68706defe2d03d01109b73577922099.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 310, + 125, + 398, + 213 + ], + "blocks": [ + { + "bbox": [ + 310, + 125, + 398, + 213 + ], + "lines": [ + { + "bbox": [ + 310, + 125, + 398, + 213 + ], + "spans": [ + { + "bbox": [ + 310, + 125, + 398, + 213 + ], + "type": "image", + "image_path": "2b5cbcdbe86ff5d8016d28bf0a81a16d30075a3c50e64b84bfd2e7a4f9dbfb54.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 406, + 126, + 493, + 213 + ], + "blocks": [ + { + "bbox": [ + 406, + 126, + 493, + 213 + ], + "lines": [ + { + "bbox": [ + 406, + 126, + 493, + 213 + ], + "spans": [ + { + "bbox": [ + 406, + 126, + 493, + 213 + ], + "type": "image", + "image_path": "9b6616b3e3e491d5719d2c2596fedf5f8396a133bfd0edae1a8a64d343c4cd8c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 118, + 228, + 206, + 316 + ], + "blocks": [ + { + "bbox": [ + 118, + 228, + 206, + 316 + ], + "lines": [ + { + "bbox": [ + 118, + 228, + 206, + 316 + ], + "spans": [ + { + "bbox": [ + 118, + 228, + 206, + 316 + ], + "type": "image", + "image_path": "e1866d261c2cafcac4f4dbe2ed648662bbfe95d442c888ae66ee515ecf40b804.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 317, + 359, + 327 + ], + "lines": [ + { + "bbox": [ + 118, + 317, + 359, + 327 + ], + "spans": [ + { + "bbox": [ + 118, + 317, + 359, + 327 + ], + "type": "text", + "content": "Input Text: \"Put a toothpick in the top of the left sandwich.\"" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 214, + 228, + 301, + 315 + ], + "blocks": [ + { + "bbox": [ + 214, + 228, + 301, + 315 + ], + "lines": [ + { + "bbox": [ + 214, + 228, + 301, + 315 + ], + "spans": [ + { + "bbox": [ + 214, + 228, + 301, + 315 + ], + "type": "image", + "image_path": "b169d0c531726750bbaeeb2fde03b21e893573395b4260fafdf640d946589a90.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 228, + 397, + 316 + ], + "blocks": [ + { + "bbox": [ + 310, + 228, + 397, + 316 + ], + "lines": [ + { + "bbox": [ + 310, + 228, + 397, + 316 + ], + "spans": [ + { + "bbox": [ + 310, + 228, + 397, + 316 + ], + "type": "image", + "image_path": "f8056f09456f348f5dfebcc2e29caddc6eaffd73644a4b2e58c236fd4f647220.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 406, + 228, + 493, + 316 + ], + "blocks": [ + { + "bbox": [ + 406, + 228, + 493, + 316 + ], + "lines": [ + { + "bbox": [ + 406, + 228, + 493, + 316 + ], + "spans": [ + { + "bbox": [ + 406, + 228, + 493, + 316 + ], + "type": "image", + "image_path": "58100ddfbfd9cfbbcbe07188ce06d08a6990468de6ef91ce1596ca7e44f0513d.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 118, + 331, + 205, + 416 + ], + "blocks": [ + { + "bbox": [ + 118, + 331, + 205, + 416 + ], + "lines": [ + { + "bbox": [ + 118, + 331, + 205, + 416 + ], + "spans": [ + { + "bbox": [ + 118, + 331, + 205, + 416 + ], + "type": "image", + "image_path": "ab999568dff4abd05c3fdb65bddbc6ea19103d123770203b58e1c0014cd42fd2.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 417, + 288, + 428 + ], + "lines": [ + { + "bbox": [ + 118, + 417, + 288, + 428 + ], + "spans": [ + { + "bbox": [ + 118, + 417, + 288, + 428 + ], + "type": "text", + "content": "Input Text: \"Change the goats into moose.\"" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 214, + 331, + 301, + 416 + ], + "blocks": [ + { + "bbox": [ + 214, + 331, + 301, + 416 + ], + "lines": [ + { + "bbox": [ + 214, + 331, + 301, + 416 + ], + "spans": [ + { + "bbox": [ + 214, + 331, + 301, + 416 + ], + "type": "image", + "image_path": "b33cf7d522604acf3a5bd5c343b02f71b892c5585b03a7601c2a93da774f7f45.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 310, + 331, + 397, + 417 + ], + "blocks": [ + { + "bbox": [ + 310, + 331, + 397, + 417 + ], + "lines": [ + { + "bbox": [ + 310, + 331, + 397, + 417 + ], + "spans": [ + { + "bbox": [ + 310, + 331, + 397, + 417 + ], + "type": "image", + "image_path": "0f95244911c8dc29653de6ffbe04b37fc4545e698fba41f0e9c612f8ef2eea1a.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 405, + 331, + 492, + 417 + ], + "blocks": [ + { + "bbox": [ + 405, + 331, + 492, + 417 + ], + "lines": [ + { + "bbox": [ + 405, + 331, + 492, + 417 + ], + "spans": [ + { + "bbox": [ + 405, + 331, + 492, + 417 + ], + "type": "image", + "image_path": "c13377bc306094edb2168ae5ee5698264067e0fb2c892c3a361247e6688d7d37.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 118, + 432, + 205, + 520 + ], + "blocks": [ + { + "bbox": [ + 118, + 432, + 205, + 520 + ], + "lines": [ + { + "bbox": [ + 118, + 432, + 205, + 520 + ], + "spans": [ + { + "bbox": [ + 118, + 432, + 205, + 520 + ], + "type": "image", + "image_path": "454c99016219342e261335a7e82a9c5e7a05daa487fd74aa7d33328c05c7b4b4.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 521, + 314, + 532 + ], + "lines": [ + { + "bbox": [ + 118, + 521, + 314, + 532 + ], + "spans": [ + { + "bbox": [ + 118, + 521, + 314, + 532 + ], + "type": "text", + "content": "Input Text: \"Replace potatoes with baked beans.\"" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 214, + 432, + 302, + 519 + ], + "blocks": [ + { + "bbox": [ + 214, + 432, + 302, + 519 + ], + "lines": [ + { + "bbox": [ + 214, + 432, + 302, + 519 + ], + "spans": [ + { + "bbox": [ + 214, + 432, + 302, + 519 + ], + "type": "image", + "image_path": "2f5b1c3c3a38c0ef1182e319f7bb487e17c8dec0cf207f9ddae29bd0055c93dc.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 310, + 432, + 397, + 519 + ], + "blocks": [ + { + "bbox": [ + 310, + 432, + 397, + 519 + ], + "lines": [ + { + "bbox": [ + 310, + 432, + 397, + 519 + ], + "spans": [ + { + "bbox": [ + 310, + 432, + 397, + 519 + ], + "type": "image", + "image_path": "33b1124e10743638cb062747979a788b7ede97dab752dac0e2e8cac1db1a2516.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 406, + 432, + 493, + 520 + ], + "blocks": [ + { + "bbox": [ + 406, + 432, + 493, + 520 + ], + "lines": [ + { + "bbox": [ + 406, + 432, + 493, + 520 + ], + "spans": [ + { + "bbox": [ + 406, + 432, + 493, + 520 + ], + "type": "image", + "image_path": "460d34550756a1743afe1027e9058e86963ba9586255d860b0f2b1c270b9280e.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 118, + 534, + 206, + 620 + ], + "blocks": [ + { + "bbox": [ + 118, + 534, + 206, + 620 + ], + "lines": [ + { + "bbox": [ + 118, + 534, + 206, + 620 + ], + "spans": [ + { + "bbox": [ + 118, + 534, + 206, + 620 + ], + "type": "image", + "image_path": "bc1073fb4ee6a5f6762fbba75598ee620fd0b2f89f1eb8f7b28e02c9a973045e.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 623, + 347, + 634 + ], + "lines": [ + { + "bbox": [ + 118, + 623, + 347, + 634 + ], + "spans": [ + { + "bbox": [ + 118, + 623, + 347, + 634 + ], + "type": "text", + "content": "Input Text: \"Change the fire hydrant to a parking meter.\"" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 139, + 640, + 190, + 651 + ], + "lines": [ + { + "bbox": [ + 139, + 640, + 190, + 651 + ], + "spans": [ + { + "bbox": [ + 139, + 640, + 190, + 651 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 77, + 665, + 533, + 721 + ], + "lines": [ + { + "bbox": [ + 77, + 665, + 533, + 721 + ], + "spans": [ + { + "bbox": [ + 77, + 665, + 533, + 721 + ], + "type": "text", + "content": "Figure 14: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MGIE [30]. Observations: GPT-4o achieves higher success rates than MGIE (examples 2/5) but occasionally alters unintended elements (bread in example 4) or lighting/shading structures (example 5). This likely stems from stronger generalization capacity and creative adaptation focus in training, though reduced fidelity suggests insufficient constraints on structural details during fine-tuning." + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 214, + 534, + 302, + 620 + ], + "blocks": [ + { + "bbox": [ + 214, + 534, + 302, + 620 + ], + "lines": [ + { + "bbox": [ + 214, + 534, + 302, + 620 + ], + "spans": [ + { + "bbox": [ + 214, + 534, + 302, + 620 + ], + "type": "image", + "image_path": "448a4c7fafc6c14e57593c609ff7476f3fc55ea22624bc38b74099fe39fc507a.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 246, + 640, + 276, + 650 + ], + "lines": [ + { + "bbox": [ + 246, + 640, + 276, + 650 + ], + "spans": [ + { + "bbox": [ + 246, + 640, + 276, + 650 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 310, + 534, + 398, + 620 + ], + "blocks": [ + { + "bbox": [ + 310, + 534, + 398, + 620 + ], + "lines": [ + { + "bbox": [ + 310, + 534, + 398, + 620 + ], + "spans": [ + { + "bbox": [ + 310, + 534, + 398, + 620 + ], + "type": "image", + "image_path": "c5822478e8f8995312ffe08cd952a629abcc29449b18ab827236ad66d1633b87.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 323, + 640, + 389, + 650 + ], + "lines": [ + { + "bbox": [ + 323, + 640, + 389, + 650 + ], + "spans": [ + { + "bbox": [ + 323, + 640, + 389, + 650 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 406, + 534, + 493, + 620 + ], + "blocks": [ + { + "bbox": [ + 406, + 534, + 493, + 620 + ], + "lines": [ + { + "bbox": [ + 406, + 534, + 493, + 620 + ], + "spans": [ + { + "bbox": [ + 406, + 534, + 493, + 620 + ], + "type": "image", + "image_path": "daa8c1bb0acfd6e425d089f419400308811f1212d5b44f851d12ef8b15bbe500.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 440, + 640, + 465, + 650 + ], + "lines": [ + { + "bbox": [ + 440, + 640, + 465, + 650 + ], + "spans": [ + { + "bbox": [ + 440, + 640, + 465, + 650 + ], + "type": "text", + "content": "MGIE" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 135, + 80, + 198, + 94 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 80, + 198, + 94 + ], + "spans": [ + { + "bbox": [ + 135, + 80, + 198, + 94 + ], + "type": "text", + "content": "Image Editing" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 134, + 79, + 197, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 79, + 197, + 92 + ], + "spans": [ + { + "bbox": [ + 134, + 79, + 197, + 92 + ], + "type": "text", + "content": "Image Editing" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 203, + 100, + 217, + 113 + ], + "blocks": [ + { + "bbox": [ + 203, + 100, + 217, + 113 + ], + "lines": [ + { + "bbox": [ + 203, + 100, + 217, + 113 + ], + "spans": [ + { + "bbox": [ + 203, + 100, + 217, + 113 + ], + "type": "image", + "image_path": "2929ca2301ebf3a6fed0fa5d79fbf4b686d60eeaac8dec6a4e0cf76ff49e665e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 217, + 102, + 405, + 115 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 102, + 405, + 115 + ], + "spans": [ + { + "bbox": [ + 217, + 102, + 405, + 115 + ], + "type": "text", + "content": "Evaluation: Instruction-following / faithful." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 119, + 118, + 205, + 205 + ], + "blocks": [ + { + "bbox": [ + 119, + 118, + 205, + 205 + ], + "lines": [ + { + "bbox": [ + 119, + 118, + 205, + 205 + ], + "spans": [ + { + "bbox": [ + 119, + 118, + 205, + 205 + ], + "type": "image", + "image_path": "93239b7986d7e146b41b57dc317de85153c1f5d166ec11db11dfd7fe2702e17c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 207, + 364, + 219 + ], + "lines": [ + { + "bbox": [ + 119, + 207, + 364, + 219 + ], + "spans": [ + { + "bbox": [ + 119, + 207, + 364, + 219 + ], + "type": "text", + "content": "Input Text: \"Turn everyone shoes into pink ballerina slippers.\"" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 214, + 118, + 302, + 205 + ], + "blocks": [ + { + "bbox": [ + 214, + 118, + 302, + 205 + ], + "lines": [ + { + "bbox": [ + 214, + 118, + 302, + 205 + ], + "spans": [ + { + "bbox": [ + 214, + 118, + 302, + 205 + ], + "type": "image", + "image_path": "ab95d11bd4370f4489f59f00bf95b0420cf3629bf46e660886023f3f96118d89.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 311, + 118, + 399, + 205 + ], + "blocks": [ + { + "bbox": [ + 311, + 118, + 399, + 205 + ], + "lines": [ + { + "bbox": [ + 311, + 118, + 399, + 205 + ], + "spans": [ + { + "bbox": [ + 311, + 118, + 399, + 205 + ], + "type": "image", + "image_path": "6cecd99cb28dc56f82b143d8d1bfc81a9ffb34558b21ff22a2629368564a9e5f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 407, + 118, + 494, + 205 + ], + "blocks": [ + { + "bbox": [ + 407, + 118, + 494, + 205 + ], + "lines": [ + { + "bbox": [ + 407, + 118, + 494, + 205 + ], + "spans": [ + { + "bbox": [ + 407, + 118, + 494, + 205 + ], + "type": "image", + "image_path": "c8841acf64bd37b3e001194394bdc34bdbf3ce6e360349fc571e6d6f84b0e03a.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 118, + 220, + 206, + 306 + ], + "blocks": [ + { + "bbox": [ + 118, + 220, + 206, + 306 + ], + "lines": [ + { + "bbox": [ + 118, + 220, + 206, + 306 + ], + "spans": [ + { + "bbox": [ + 118, + 220, + 206, + 306 + ], + "type": "image", + "image_path": "5ef2471bae5d6ac28f80fd9606df44d177b953395bf0f4d7328ae08ca174b1ee.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 308, + 357, + 319 + ], + "lines": [ + { + "bbox": [ + 118, + 308, + 357, + 319 + ], + "spans": [ + { + "bbox": [ + 118, + 308, + 357, + 319 + ], + "type": "text", + "content": "Input Text: \"Remove the fence from in front of the horses.\"" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 214, + 220, + 303, + 305 + ], + "blocks": [ + { + "bbox": [ + 214, + 220, + 303, + 305 + ], + "lines": [ + { + "bbox": [ + 214, + 220, + 303, + 305 + ], + "spans": [ + { + "bbox": [ + 214, + 220, + 303, + 305 + ], + "type": "image", + "image_path": "530cb7230b747fa4e841052b66f6154fe575e082392dc9ebee75fb61c0ee6728.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 220, + 398, + 306 + ], + "blocks": [ + { + "bbox": [ + 310, + 220, + 398, + 306 + ], + "lines": [ + { + "bbox": [ + 310, + 220, + 398, + 306 + ], + "spans": [ + { + "bbox": [ + 310, + 220, + 398, + 306 + ], + "type": "image", + "image_path": "1c82b96334f0b7ff5da3c6643db4451a39f618d24f04fa9a677945a841872526.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 406, + 220, + 494, + 306 + ], + "blocks": [ + { + "bbox": [ + 406, + 220, + 494, + 306 + ], + "lines": [ + { + "bbox": [ + 406, + 220, + 494, + 306 + ], + "spans": [ + { + "bbox": [ + 406, + 220, + 494, + 306 + ], + "type": "image", + "image_path": "70e41d785dd49e7ac3f98b75bebf61abc711768215fa3f8fc2105d70e0467d9d.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 118, + 322, + 205, + 408 + ], + "blocks": [ + { + "bbox": [ + 118, + 322, + 205, + 408 + ], + "lines": [ + { + "bbox": [ + 118, + 322, + 205, + 408 + ], + "spans": [ + { + "bbox": [ + 118, + 322, + 205, + 408 + ], + "type": "image", + "image_path": "2884734e16a9fec622bef494287b9ffd3440adb4d2a9f14797db74ec8bd44225.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 408, + 336, + 419 + ], + "lines": [ + { + "bbox": [ + 119, + 408, + 336, + 419 + ], + "spans": [ + { + "bbox": [ + 119, + 408, + 336, + 419 + ], + "type": "text", + "content": "Input Text: \"Remove the baby elephant in the picture.\"" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 214, + 322, + 302, + 408 + ], + "blocks": [ + { + "bbox": [ + 214, + 322, + 302, + 408 + ], + "lines": [ + { + "bbox": [ + 214, + 322, + 302, + 408 + ], + "spans": [ + { + "bbox": [ + 214, + 322, + 302, + 408 + ], + "type": "image", + "image_path": "c4aa3522a28b388224935e752868d0c11d026b182a9b1476c099f85eecf8b27d.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 311, + 321, + 400, + 407 + ], + "blocks": [ + { + "bbox": [ + 311, + 321, + 400, + 407 + ], + "lines": [ + { + "bbox": [ + 311, + 321, + 400, + 407 + ], + "spans": [ + { + "bbox": [ + 311, + 321, + 400, + 407 + ], + "type": "image", + "image_path": "6bd024f68b4e3ada55b3eb581d5cca2d29125289402827758186fcce06573946.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 407, + 321, + 494, + 407 + ], + "blocks": [ + { + "bbox": [ + 407, + 321, + 494, + 407 + ], + "lines": [ + { + "bbox": [ + 407, + 321, + 494, + 407 + ], + "spans": [ + { + "bbox": [ + 407, + 321, + 494, + 407 + ], + "type": "image", + "image_path": "2865dc07722a294ea85dfd9841947ab7e799e3d91a0f4a9092b0ff0c6fddd5ac.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 119, + 421, + 206, + 508 + ], + "blocks": [ + { + "bbox": [ + 119, + 421, + 206, + 508 + ], + "lines": [ + { + "bbox": [ + 119, + 421, + 206, + 508 + ], + "spans": [ + { + "bbox": [ + 119, + 421, + 206, + 508 + ], + "type": "image", + "image_path": "abf7f3e688bfa59a5ecc1db78f61491254ec021fedeea5c85187c6562809d61c.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 214, + 422, + 302, + 508 + ], + "blocks": [ + { + "bbox": [ + 214, + 422, + 302, + 508 + ], + "lines": [ + { + "bbox": [ + 214, + 422, + 302, + 508 + ], + "spans": [ + { + "bbox": [ + 214, + 422, + 302, + 508 + ], + "type": "image", + "image_path": "625aa6127eca041e5810dc4b46c00716834bba9bb7fb4e2613067d7135aba20f.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 312, + 422, + 400, + 508 + ], + "blocks": [ + { + "bbox": [ + 312, + 422, + 400, + 508 + ], + "lines": [ + { + "bbox": [ + 312, + 422, + 400, + 508 + ], + "spans": [ + { + "bbox": [ + 312, + 422, + 400, + 508 + ], + "type": "image", + "image_path": "fbfad470b3b72d3478e91210af29255466f94c98c96fb529b634e931bd2f9848.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 408, + 421, + 495, + 508 + ], + "blocks": [ + { + "bbox": [ + 408, + 421, + 495, + 508 + ], + "lines": [ + { + "bbox": [ + 408, + 421, + 495, + 508 + ], + "spans": [ + { + "bbox": [ + 408, + 421, + 495, + 508 + ], + "type": "image", + "image_path": "5447a1f82a340e52710bd0c108dac73285aab065976ccc90d3243a192c03666f.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 119, + 522, + 206, + 608 + ], + "blocks": [ + { + "bbox": [ + 119, + 510, + 334, + 521 + ], + "lines": [ + { + "bbox": [ + 119, + 510, + 334, + 521 + ], + "spans": [ + { + "bbox": [ + 119, + 510, + 334, + 521 + ], + "type": "text", + "content": "Input Text: \"Change the yellow hat into a cowboy hat.\"" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 119, + 522, + 206, + 608 + ], + "lines": [ + { + "bbox": [ + 119, + 522, + 206, + 608 + ], + "spans": [ + { + "bbox": [ + 119, + 522, + 206, + 608 + ], + "type": "image", + "image_path": "013887f45aed224843b4df7ea2aba71451637e932d1143fbfcacbd27271f65f3.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 609, + 336, + 620 + ], + "lines": [ + { + "bbox": [ + 119, + 609, + 336, + 620 + ], + "spans": [ + { + "bbox": [ + 119, + 609, + 336, + 620 + ], + "type": "text", + "content": "Input Text: \"Remove the people from the background\"." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 136, + 624, + 187, + 635 + ], + "lines": [ + { + "bbox": [ + 136, + 624, + 187, + 635 + ], + "spans": [ + { + "bbox": [ + 136, + 624, + 187, + 635 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 217, + 522, + 304, + 608 + ], + "blocks": [ + { + "bbox": [ + 217, + 522, + 304, + 608 + ], + "lines": [ + { + "bbox": [ + 217, + 522, + 304, + 608 + ], + "spans": [ + { + "bbox": [ + 217, + 522, + 304, + 608 + ], + "type": "image", + "image_path": "8acbf5757555da90dab4610d0040e9baac69b0e9e18c478fba61f00f601266a2.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 243, + 624, + 273, + 634 + ], + "lines": [ + { + "bbox": [ + 243, + 624, + 273, + 634 + ], + "spans": [ + { + "bbox": [ + 243, + 624, + 273, + 634 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 312, + 522, + 400, + 608 + ], + "blocks": [ + { + "bbox": [ + 312, + 522, + 400, + 608 + ], + "lines": [ + { + "bbox": [ + 312, + 522, + 400, + 608 + ], + "spans": [ + { + "bbox": [ + 312, + 522, + 400, + 608 + ], + "type": "image", + "image_path": "ef4e183b7dfd1b0ebe989bb80028076acf9467e603aef017798bb91bb385489e.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 321, + 624, + 386, + 634 + ], + "lines": [ + { + "bbox": [ + 321, + 624, + 386, + 634 + ], + "spans": [ + { + "bbox": [ + 321, + 624, + 386, + 634 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 407, + 522, + 494, + 609 + ], + "blocks": [ + { + "bbox": [ + 407, + 522, + 494, + 609 + ], + "lines": [ + { + "bbox": [ + 407, + 522, + 494, + 609 + ], + "spans": [ + { + "bbox": [ + 407, + 522, + 494, + 609 + ], + "type": "image", + "image_path": "29d11d5b373c8c6684c115fc01b10b1a340f12dbedfc7dfb4b61051e6911c7fe.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 438, + 624, + 462, + 634 + ], + "lines": [ + { + "bbox": [ + 438, + 624, + 462, + 634 + ], + "spans": [ + { + "bbox": [ + 438, + 624, + 462, + 634 + ], + "type": "text", + "content": "MGIE" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 77, + 650, + 533, + 717 + ], + "lines": [ + { + "bbox": [ + 77, + 650, + 533, + 717 + ], + "spans": [ + { + "bbox": [ + 77, + 650, + 533, + 717 + ], + "type": "text", + "content": "Figure 15: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MGIE [30]. Observations: From examples 1-3, GPT-4o shows higher success in fine detail edits and large-scale edits with occlusions. This likely stems from GPT-4o's stronger contextual understanding and ability to infer missing or obscured elements, enabling more precise localized edits and coherent large-scale modifications even with partial visibility. However, it sometimes erases non-target elements (e.g., the house in example 5) and significantly alters global lighting (example 4)." + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 135, + 79, + 198, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 79, + 198, + 92 + ], + "spans": [ + { + "bbox": [ + 135, + 79, + 198, + 92 + ], + "type": "text", + "content": "Image Editing" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 203, + 99, + 217, + 113 + ], + "blocks": [ + { + "bbox": [ + 203, + 99, + 217, + 113 + ], + "lines": [ + { + "bbox": [ + 203, + 99, + 217, + 113 + ], + "spans": [ + { + "bbox": [ + 203, + 99, + 217, + 113 + ], + "type": "image", + "image_path": "a5f0857371c912d73812959c5154aa5f588c9c53b0cac8d82d05e68ed4b24ba2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 217, + 102, + 406, + 115 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 102, + 406, + 115 + ], + "spans": [ + { + "bbox": [ + 217, + 102, + 406, + 115 + ], + "type": "text", + "content": "Evaluation: Instruction-following / faithful." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 120, + 120, + 207, + 206 + ], + "blocks": [ + { + "bbox": [ + 120, + 120, + 207, + 206 + ], + "lines": [ + { + "bbox": [ + 120, + 120, + 207, + 206 + ], + "spans": [ + { + "bbox": [ + 120, + 120, + 207, + 206 + ], + "type": "image", + "image_path": "7fae08c7d278ec0abfd7dfa99c03539fe6218331e03f2ae1578f75bab7ed9747.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 216, + 119, + 303, + 205 + ], + "blocks": [ + { + "bbox": [ + 216, + 119, + 303, + 205 + ], + "lines": [ + { + "bbox": [ + 216, + 119, + 303, + 205 + ], + "spans": [ + { + "bbox": [ + 216, + 119, + 303, + 205 + ], + "type": "image", + "image_path": "7b6fe8843d08c99ead6fe3a01df0d1b2466ad14914ede1de9b670e85d3294614.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 312, + 118, + 397, + 205 + ], + "blocks": [ + { + "bbox": [ + 312, + 118, + 397, + 205 + ], + "lines": [ + { + "bbox": [ + 312, + 118, + 397, + 205 + ], + "spans": [ + { + "bbox": [ + 312, + 118, + 397, + 205 + ], + "type": "image", + "image_path": "18ac5988622ec524574bc3cacfb9b7b32ff24f0458ab883762dd159f18758912.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 408, + 118, + 494, + 206 + ], + "blocks": [ + { + "bbox": [ + 408, + 118, + 494, + 206 + ], + "lines": [ + { + "bbox": [ + 408, + 118, + 494, + 206 + ], + "spans": [ + { + "bbox": [ + 408, + 118, + 494, + 206 + ], + "type": "image", + "image_path": "64f0d91151654ae4516f7d303ba48c4e01706725697a79b75bb9494acae439d4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 119, + 207, + 419, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 207, + 419, + 217 + ], + "spans": [ + { + "bbox": [ + 119, + 207, + 419, + 217 + ], + "type": "text", + "content": "Input Text: \"Change the background to the set of a nickelodeon game show.\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 119, + 220, + 205, + 307 + ], + "blocks": [ + { + "bbox": [ + 119, + 220, + 205, + 307 + ], + "lines": [ + { + "bbox": [ + 119, + 220, + 205, + 307 + ], + "spans": [ + { + "bbox": [ + 119, + 220, + 205, + 307 + ], + "type": "image", + "image_path": "577dde2d7debd868c1455d80030c181d453b6b7c4530b75d54e42bb0bc034596.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 216, + 220, + 302, + 308 + ], + "blocks": [ + { + "bbox": [ + 216, + 220, + 302, + 308 + ], + "lines": [ + { + "bbox": [ + 216, + 220, + 302, + 308 + ], + "spans": [ + { + "bbox": [ + 216, + 220, + 302, + 308 + ], + "type": "image", + "image_path": "03db7b6af0e509243f783b8ce2d6a7d6f8244e6d1c3cca245f733119d0adc4d8.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 220, + 398, + 308 + ], + "blocks": [ + { + "bbox": [ + 310, + 220, + 398, + 308 + ], + "lines": [ + { + "bbox": [ + 310, + 220, + 398, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 220, + 398, + 308 + ], + "type": "image", + "image_path": "526a547e833ad8f409b85457a4b260fbfa64463b2a497a191035f2bdb24ac6b7.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 405, + 220, + 492, + 308 + ], + "blocks": [ + { + "bbox": [ + 405, + 220, + 492, + 308 + ], + "lines": [ + { + "bbox": [ + 405, + 220, + 492, + 308 + ], + "spans": [ + { + "bbox": [ + 405, + 220, + 492, + 308 + ], + "type": "image", + "image_path": "777c4e2019957c2a3491ab6ec8e5dbcb0731c4d26a35844fe6edc39ec5266953.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 118, + 309, + 295, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 309, + 295, + 320 + ], + "spans": [ + { + "bbox": [ + 118, + 309, + 295, + 320 + ], + "type": "text", + "content": "Input Text: \"Have the dog prick up its ears.\"" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 118, + 326, + 204, + 408 + ], + "blocks": [ + { + "bbox": [ + 118, + 326, + 204, + 408 + ], + "lines": [ + { + "bbox": [ + 118, + 326, + 204, + 408 + ], + "spans": [ + { + "bbox": [ + 118, + 326, + 204, + 408 + ], + "type": "image", + "image_path": "4717d46544cd52b4e89e284f9ced1f798ee955d4101bcead34498865073ada80.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 216, + 326, + 302, + 408 + ], + "blocks": [ + { + "bbox": [ + 216, + 326, + 302, + 408 + ], + "lines": [ + { + "bbox": [ + 216, + 326, + 302, + 408 + ], + "spans": [ + { + "bbox": [ + 216, + 326, + 302, + 408 + ], + "type": "image", + "image_path": "f8b04a681ef9c0a1c0fb445e1ea16e42cbc375d735002dad0a66db67be113463.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 311, + 325, + 398, + 409 + ], + "blocks": [ + { + "bbox": [ + 311, + 325, + 398, + 409 + ], + "lines": [ + { + "bbox": [ + 311, + 325, + 398, + 409 + ], + "spans": [ + { + "bbox": [ + 311, + 325, + 398, + 409 + ], + "type": "image", + "image_path": "67a212cd49f8bf5728997494e42d4d92f076bc6f5c1e66b1dd42cfdcaaf6779c.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 406, + 325, + 493, + 410 + ], + "blocks": [ + { + "bbox": [ + 406, + 325, + 493, + 410 + ], + "lines": [ + { + "bbox": [ + 406, + 325, + 493, + 410 + ], + "spans": [ + { + "bbox": [ + 406, + 325, + 493, + 410 + ], + "type": "image", + "image_path": "8f600bc759b57a060077f8d020e12edc5a0d30fdd50d923a4ba69ef3ab7e3b8a.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 119, + 410, + 299, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 410, + 299, + 419 + ], + "spans": [ + { + "bbox": [ + 119, + 410, + 299, + 419 + ], + "type": "text", + "content": "Input Text: \"Have the elephant's tail raised.\"" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 119, + 422, + 205, + 510 + ], + "blocks": [ + { + "bbox": [ + 119, + 422, + 205, + 510 + ], + "lines": [ + { + "bbox": [ + 119, + 422, + 205, + 510 + ], + "spans": [ + { + "bbox": [ + 119, + 422, + 205, + 510 + ], + "type": "image", + "image_path": "3986b51693c8099322835c4b144aa50d43241eafeb2e294ac53df6b06556a42b.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 662, + 532, + 718 + ], + "lines": [ + { + "bbox": [ + 77, + 662, + 532, + 718 + ], + "spans": [ + { + "bbox": [ + 77, + 662, + 532, + 718 + ], + "type": "text", + "content": "Figure 16: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MGIE [30]. Observations: From Example 1, GPT-4o demonstrates superior performance in style editing, effectively interpreting style instructions and preserving global image structure—a capability lacking in baseline models (MGIE, Gemini 2.0 Flash, and MagicBrush, as will be shown later). This likely stems from its stronger cross-modal comprehension and structural awareness during training." + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 215, + 423, + 302, + 510 + ], + "blocks": [ + { + "bbox": [ + 215, + 423, + 302, + 510 + ], + "lines": [ + { + "bbox": [ + 215, + 423, + 302, + 510 + ], + "spans": [ + { + "bbox": [ + 215, + 423, + 302, + 510 + ], + "type": "image", + "image_path": "f91e7c322f8dc4a9e7e58df1c8b1d035d289d156992fbfb80f834d5f06e3e4d2.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 312, + 423, + 397, + 508 + ], + "blocks": [ + { + "bbox": [ + 312, + 423, + 397, + 508 + ], + "lines": [ + { + "bbox": [ + 312, + 423, + 397, + 508 + ], + "spans": [ + { + "bbox": [ + 312, + 423, + 397, + 508 + ], + "type": "image", + "image_path": "9ff188c0ba980c8e75a69ac4f116a1d53d641501e8a2976781fdf333d07f29ca.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 407, + 423, + 493, + 509 + ], + "blocks": [ + { + "bbox": [ + 407, + 423, + 493, + 509 + ], + "lines": [ + { + "bbox": [ + 407, + 423, + 493, + 509 + ], + "spans": [ + { + "bbox": [ + 407, + 423, + 493, + 509 + ], + "type": "image", + "image_path": "a179e4417d79d0b36fecc454a7c170f44da2165e686f30f95e1e62f5dacec7e9.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 120, + 512, + 331, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 512, + 331, + 523 + ], + "spans": [ + { + "bbox": [ + 120, + 512, + 331, + 523 + ], + "type": "text", + "content": "Input Text: \"Change the background to Vatican City.\"" + } + ] + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 119, + 526, + 207, + 611 + ], + "blocks": [ + { + "bbox": [ + 119, + 526, + 207, + 611 + ], + "lines": [ + { + "bbox": [ + 119, + 526, + 207, + 611 + ], + "spans": [ + { + "bbox": [ + 119, + 526, + 207, + 611 + ], + "type": "image", + "image_path": "15fc7c5a66f48467e502347e843669f0689718ebbba95832bb0c704a6633d0b7.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 217, + 525, + 303, + 611 + ], + "blocks": [ + { + "bbox": [ + 217, + 525, + 303, + 611 + ], + "lines": [ + { + "bbox": [ + 217, + 525, + 303, + 611 + ], + "spans": [ + { + "bbox": [ + 217, + 525, + 303, + 611 + ], + "type": "image", + "image_path": "285c8caa7cc67cd2c1b2e70ff31cb3ba4484255739d14c8d701b34ac2e79ec36.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 313, + 525, + 400, + 611 + ], + "blocks": [ + { + "bbox": [ + 313, + 525, + 400, + 611 + ], + "lines": [ + { + "bbox": [ + 313, + 525, + 400, + 611 + ], + "spans": [ + { + "bbox": [ + 313, + 525, + 400, + 611 + ], + "type": "image", + "image_path": "aff34b2fc3ec9bfe6b6d30adc9b2581aab5368e8efd4ea317e48bc4626796721.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 408, + 525, + 493, + 611 + ], + "blocks": [ + { + "bbox": [ + 408, + 525, + 493, + 611 + ], + "lines": [ + { + "bbox": [ + 408, + 525, + 493, + 611 + ], + "spans": [ + { + "bbox": [ + 408, + 525, + 493, + 611 + ], + "type": "image", + "image_path": "e70d73a385cda6554925808b97816d9c1e848d834e5052992231cc4954cfd336.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "bbox": [ + 120, + 613, + 337, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 613, + 337, + 624 + ], + "spans": [ + { + "bbox": [ + 120, + 613, + 337, + 624 + ], + "type": "text", + "content": "Input Text: \"Change the background to Mount Rainier.\"" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 137, + 632, + 187, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 632, + 187, + 643 + ], + "spans": [ + { + "bbox": [ + 137, + 632, + 187, + 643 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 244, + 632, + 274, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 632, + 274, + 642 + ], + "spans": [ + { + "bbox": [ + 244, + 632, + 274, + 642 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 322, + 632, + 387, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 632, + 387, + 642 + ], + "spans": [ + { + "bbox": [ + 322, + 632, + 387, + 642 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 438, + 632, + 463, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 438, + 632, + 463, + 642 + ], + "spans": [ + { + "bbox": [ + 438, + 632, + 463, + 642 + ], + "type": "text", + "content": "MGIE" + } + ] + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 137, + 80, + 199, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 80, + 199, + 94 + ], + "spans": [ + { + "bbox": [ + 137, + 80, + 199, + 94 + ], + "type": "text", + "content": "Image Editing" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 205, + 101, + 407, + 117 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 101, + 407, + 117 + ], + "spans": [ + { + "bbox": [ + 205, + 101, + 407, + 117 + ], + "type": "text", + "content": "Evaluation: Instruction-following / faithful." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 120, + 121, + 207, + 207 + ], + "blocks": [ + { + "bbox": [ + 120, + 121, + 207, + 207 + ], + "lines": [ + { + "bbox": [ + 120, + 121, + 207, + 207 + ], + "spans": [ + { + "bbox": [ + 120, + 121, + 207, + 207 + ], + "type": "image", + "image_path": "0b872b88a7a28e69a1daae9c6423253e4db8e407800572b004fd616d814d9b24.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 121, + 209, + 324, + 219 + ], + "lines": [ + { + "bbox": [ + 121, + 209, + 324, + 219 + ], + "spans": [ + { + "bbox": [ + 121, + 209, + 324, + 219 + ], + "type": "text", + "content": "Input Text: \"Add a white hat to the woman's head.\"" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 215, + 121, + 302, + 207 + ], + "blocks": [ + { + "bbox": [ + 215, + 121, + 302, + 207 + ], + "lines": [ + { + "bbox": [ + 215, + 121, + 302, + 207 + ], + "spans": [ + { + "bbox": [ + 215, + 121, + 302, + 207 + ], + "type": "image", + "image_path": "2c9aaa16beceac6929cab3c491e5386e4870c1879821502bc939b57f21a74ac6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 312, + 121, + 397, + 207 + ], + "blocks": [ + { + "bbox": [ + 312, + 121, + 397, + 207 + ], + "lines": [ + { + "bbox": [ + 312, + 121, + 397, + 207 + ], + "spans": [ + { + "bbox": [ + 312, + 121, + 397, + 207 + ], + "type": "image", + "image_path": "63887670dc86e68d25f445742e627570bc0bce4be5ac6001afe0c309c31c6b0a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 405, + 121, + 492, + 208 + ], + "blocks": [ + { + "bbox": [ + 405, + 121, + 492, + 208 + ], + "lines": [ + { + "bbox": [ + 405, + 121, + 492, + 208 + ], + "spans": [ + { + "bbox": [ + 405, + 121, + 492, + 208 + ], + "type": "image", + "image_path": "f1c73606a27398749d1cdb7f461d7eb75573cba818d1bde958bce44ebacb6dbd.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 121, + 222, + 207, + 308 + ], + "blocks": [ + { + "bbox": [ + 121, + 222, + 207, + 308 + ], + "lines": [ + { + "bbox": [ + 121, + 222, + 207, + 308 + ], + "spans": [ + { + "bbox": [ + 121, + 222, + 207, + 308 + ], + "type": "image", + "image_path": "3fae030462e9df055665edfa8c3602ad144dba999e49b471e3dfd45d576c435a.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 122, + 309, + 363, + 319 + ], + "lines": [ + { + "bbox": [ + 122, + 309, + 363, + 319 + ], + "spans": [ + { + "bbox": [ + 122, + 309, + 363, + 319 + ], + "type": "text", + "content": "Input Text: \"Delete the oranges from the shelf in the image.\"" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 217, + 222, + 303, + 308 + ], + "blocks": [ + { + "bbox": [ + 217, + 222, + 303, + 308 + ], + "lines": [ + { + "bbox": [ + 217, + 222, + 303, + 308 + ], + "spans": [ + { + "bbox": [ + 217, + 222, + 303, + 308 + ], + "type": "image", + "image_path": "bff611326012e121ed651fc6f65158c4598f63fd2b8ac3ecac83f8c3ed9bba15.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 313, + 222, + 399, + 308 + ], + "blocks": [ + { + "bbox": [ + 313, + 222, + 399, + 308 + ], + "lines": [ + { + "bbox": [ + 313, + 222, + 399, + 308 + ], + "spans": [ + { + "bbox": [ + 313, + 222, + 399, + 308 + ], + "type": "image", + "image_path": "40c5465d2843a6e6f41aaecc277b509f4346d85fd64000b3badbfe3d66eb079e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 407, + 222, + 493, + 308 + ], + "blocks": [ + { + "bbox": [ + 407, + 222, + 493, + 308 + ], + "lines": [ + { + "bbox": [ + 407, + 222, + 493, + 308 + ], + "spans": [ + { + "bbox": [ + 407, + 222, + 493, + 308 + ], + "type": "image", + "image_path": "784eb3154634368e8aff984cdd03ed74a47d81368a315b4c0bbe827a0051146d.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 121, + 322, + 208, + 407 + ], + "blocks": [ + { + "bbox": [ + 121, + 322, + 208, + 407 + ], + "lines": [ + { + "bbox": [ + 121, + 322, + 208, + 407 + ], + "spans": [ + { + "bbox": [ + 121, + 322, + 208, + 407 + ], + "type": "image", + "image_path": "2893eb40cbf1268dab0c08335defc17fff61e8a62c9f7f88cbb40f2797ff174b.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 121, + 407, + 392, + 418 + ], + "lines": [ + { + "bbox": [ + 121, + 407, + 392, + 418 + ], + "spans": [ + { + "bbox": [ + 121, + 407, + 392, + 418 + ], + "type": "text", + "content": "Input Text: \"Get rid of the water the elephants are walking through.\"" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 217, + 321, + 303, + 407 + ], + "blocks": [ + { + "bbox": [ + 217, + 321, + 303, + 407 + ], + "lines": [ + { + "bbox": [ + 217, + 321, + 303, + 407 + ], + "spans": [ + { + "bbox": [ + 217, + 321, + 303, + 407 + ], + "type": "image", + "image_path": "136b9b62bc4d4cdfd994c33d5283fe146823a2feb7d76000b30d2a22fbb628a3.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 312, + 321, + 398, + 407 + ], + "blocks": [ + { + "bbox": [ + 312, + 321, + 398, + 407 + ], + "lines": [ + { + "bbox": [ + 312, + 321, + 398, + 407 + ], + "spans": [ + { + "bbox": [ + 312, + 321, + 398, + 407 + ], + "type": "image", + "image_path": "48b98ec70c96ab029b0a162958ffc7baf258a979ab48209783328fd17cc0c608.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 406, + 321, + 492, + 407 + ], + "blocks": [ + { + "bbox": [ + 406, + 321, + 492, + 407 + ], + "lines": [ + { + "bbox": [ + 406, + 321, + 492, + 407 + ], + "spans": [ + { + "bbox": [ + 406, + 321, + 492, + 407 + ], + "type": "image", + "image_path": "0aa8c1c419160769a182de4039193ca181bf320312a187d89844da7d1bcd55dc.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 420, + 187, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 420, + 187, + 431 + ], + "spans": [ + { + "bbox": [ + 138, + 420, + 187, + 431 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 243, + 420, + 273, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 243, + 420, + 273, + 430 + ], + "spans": [ + { + "bbox": [ + 243, + 420, + 273, + 430 + ], + "type": "text", + "content": "GPT-40" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 321, + 420, + 385, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 420, + 385, + 430 + ], + "spans": [ + { + "bbox": [ + 321, + 420, + 385, + 430 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 427, + 420, + 468, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 427, + 420, + 468, + 430 + ], + "spans": [ + { + "bbox": [ + 427, + 420, + 468, + 430 + ], + "type": "text", + "content": "LEDS++" + } + ] + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 120, + 436, + 206, + 520 + ], + "blocks": [ + { + "bbox": [ + 120, + 436, + 206, + 520 + ], + "lines": [ + { + "bbox": [ + 120, + 436, + 206, + 520 + ], + "spans": [ + { + "bbox": [ + 120, + 436, + 206, + 520 + ], + "type": "image", + "image_path": "d4b9f29b0ab478363d67fd9b4ba47dd4d96e7162137bd71d2ff0b6ad6599db77.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 216, + 436, + 301, + 521 + ], + "blocks": [ + { + "bbox": [ + 216, + 436, + 301, + 521 + ], + "lines": [ + { + "bbox": [ + 216, + 436, + 301, + 521 + ], + "spans": [ + { + "bbox": [ + 216, + 436, + 301, + 521 + ], + "type": "image", + "image_path": "1d87a3ecf1aa5cd61e4772d303263ffdce0edea148e97f37a796904330cd58ee.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 121, + 522, + 295, + 533 + ], + "lines": [ + { + "bbox": [ + 121, + 522, + 295, + 533 + ], + "spans": [ + { + "bbox": [ + 121, + 522, + 295, + 533 + ], + "type": "text", + "content": "Input Text: \"Show the seal raising its head.\"" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 312, + 436, + 397, + 521 + ], + "blocks": [ + { + "bbox": [ + 312, + 436, + 397, + 521 + ], + "lines": [ + { + "bbox": [ + 312, + 436, + 397, + 521 + ], + "spans": [ + { + "bbox": [ + 312, + 436, + 397, + 521 + ], + "type": "image", + "image_path": "69763d65a9a155bb94fbc23e0c07823b4fbfe54ae0150f8bf1838f34853a3bce.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 406, + 436, + 492, + 521 + ], + "blocks": [ + { + "bbox": [ + 406, + 436, + 492, + 521 + ], + "lines": [ + { + "bbox": [ + 406, + 436, + 492, + 521 + ], + "spans": [ + { + "bbox": [ + 406, + 436, + 492, + 521 + ], + "type": "image", + "image_path": "c55a85d62ceb01b432fd7559421cb62d05bd7e42574b82a7254f0ddc252a5fb1.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 120, + 536, + 205, + 621 + ], + "blocks": [ + { + "bbox": [ + 120, + 536, + 205, + 621 + ], + "lines": [ + { + "bbox": [ + 120, + 536, + 205, + 621 + ], + "spans": [ + { + "bbox": [ + 120, + 536, + 205, + 621 + ], + "type": "image", + "image_path": "c8b6d0e8d66c6539cd6cfb4284248840031b6937e228c7f897330acc92dadf26.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 216, + 536, + 301, + 622 + ], + "blocks": [ + { + "bbox": [ + 216, + 536, + 301, + 622 + ], + "lines": [ + { + "bbox": [ + 216, + 536, + 301, + 622 + ], + "spans": [ + { + "bbox": [ + 216, + 536, + 301, + 622 + ], + "type": "image", + "image_path": "f206c5532d96bfcc1cf723bce40c254819c6dcc8f7c3dab25bd20471924a9c5d.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 121, + 623, + 304, + 633 + ], + "lines": [ + { + "bbox": [ + 121, + 623, + 304, + 633 + ], + "spans": [ + { + "bbox": [ + 121, + 623, + 304, + 633 + ], + "type": "text", + "content": "Input Text: \"Change the sky to stars at night.\"" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 77, + 662, + 533, + 718 + ], + "lines": [ + { + "bbox": [ + 77, + 662, + 533, + 718 + ], + "spans": [ + { + "bbox": [ + 77, + 662, + 533, + 718 + ], + "type": "text", + "content": "Figure 17: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/LEDITS++ [9]/MagicBrush [120]. Observations: From Examples 2 and 3, GPT-4o demonstrates stronger comprehension of instructions involving 'the oranges on the shelf' and 'the water the elephants are walking through', translating this understanding into more accurate edits. This suggests better grounding of textual prompts in visual context during generation." + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 312, + 536, + 397, + 621 + ], + "blocks": [ + { + "bbox": [ + 312, + 536, + 397, + 621 + ], + "lines": [ + { + "bbox": [ + 312, + 536, + 397, + 621 + ], + "spans": [ + { + "bbox": [ + 312, + 536, + 397, + 621 + ], + "type": "image", + "image_path": "f0852ac207fe5ce1fa4118aefca299ece81ad07b3b1dabb16edd46a65c5a542c.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 405, + 536, + 492, + 621 + ], + "blocks": [ + { + "bbox": [ + 405, + 536, + 492, + 621 + ], + "lines": [ + { + "bbox": [ + 405, + 536, + 492, + 621 + ], + "spans": [ + { + "bbox": [ + 405, + 536, + 492, + 621 + ], + "type": "image", + "image_path": "4f92cfa09792f7a936729795c4faf6f6742488baef7a2394db6b9a5dc3b73e04.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "bbox": [ + 139, + 635, + 189, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 635, + 189, + 646 + ], + "spans": [ + { + "bbox": [ + 139, + 635, + 189, + 646 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 244, + 635, + 275, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 635, + 275, + 644 + ], + "spans": [ + { + "bbox": [ + 244, + 635, + 275, + 644 + ], + "type": "text", + "content": "GPT-40" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 322, + 635, + 387, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 635, + 387, + 644 + ], + "spans": [ + { + "bbox": [ + 322, + 635, + 387, + 644 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 427, + 635, + 473, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 427, + 635, + 473, + 646 + ], + "spans": [ + { + "bbox": [ + 427, + 635, + 473, + 646 + ], + "type": "text", + "content": "MagicBrush" + } + ] + } + ], + "index": 34 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 135, + 86, + 196, + 99 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 86, + 196, + 99 + ], + "spans": [ + { + "bbox": [ + 135, + 86, + 196, + 99 + ], + "type": "text", + "content": "Image Editing" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 203, + 107, + 217, + 121 + ], + "blocks": [ + { + "bbox": [ + 203, + 107, + 217, + 121 + ], + "lines": [ + { + "bbox": [ + 203, + 107, + 217, + 121 + ], + "spans": [ + { + "bbox": [ + 203, + 107, + 217, + 121 + ], + "type": "image", + "image_path": "8b47755995c00dd9ee49f071a5906d93f0c148923ccaa69fb15f0c73140fc71c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 217, + 111, + 406, + 124 + ], + "lines": [ + { + "bbox": [ + 217, + 111, + 406, + 124 + ], + "spans": [ + { + "bbox": [ + 217, + 111, + 406, + 124 + ], + "type": "text", + "content": "Evaluation: Instruction-following / faithful." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 118, + 129, + 203, + 215 + ], + "blocks": [ + { + "bbox": [ + 118, + 129, + 203, + 215 + ], + "lines": [ + { + "bbox": [ + 118, + 129, + 203, + 215 + ], + "spans": [ + { + "bbox": [ + 118, + 129, + 203, + 215 + ], + "type": "image", + "image_path": "f7744cbca6f65d6b7b76f3ed15020d3fc9b6f8ed98f636028e5ced70384c52bd.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 217, + 403, + 228 + ], + "lines": [ + { + "bbox": [ + 118, + 217, + 403, + 228 + ], + "spans": [ + { + "bbox": [ + 118, + 217, + 403, + 228 + ], + "type": "text", + "content": "Input Text: \"Change the image to a 1950s Flintstones cartoon art style.\"" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 214, + 129, + 302, + 215 + ], + "blocks": [ + { + "bbox": [ + 214, + 129, + 302, + 215 + ], + "lines": [ + { + "bbox": [ + 214, + 129, + 302, + 215 + ], + "spans": [ + { + "bbox": [ + 214, + 129, + 302, + 215 + ], + "type": "image", + "image_path": "c667c273e0d72e65f57befa30d7eac7de39e3dd5500f8b8982bf964a2af0bfd0.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 312, + 129, + 398, + 215 + ], + "blocks": [ + { + "bbox": [ + 312, + 129, + 398, + 215 + ], + "lines": [ + { + "bbox": [ + 312, + 129, + 398, + 215 + ], + "spans": [ + { + "bbox": [ + 312, + 129, + 398, + 215 + ], + "type": "image", + "image_path": "a52dd1eb6b418ed5ae687d895e95de05feea7dd01a9e70453d3ef0959530bc3a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 406, + 130, + 492, + 215 + ], + "blocks": [ + { + "bbox": [ + 406, + 130, + 492, + 215 + ], + "lines": [ + { + "bbox": [ + 406, + 130, + 492, + 215 + ], + "spans": [ + { + "bbox": [ + 406, + 130, + 492, + 215 + ], + "type": "image", + "image_path": "58dcd2fc462e78a9dc9f7d7d52dd5084aa6aafb0d48039e3f826d0102bca067b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 117, + 231, + 204, + 319 + ], + "blocks": [ + { + "bbox": [ + 117, + 231, + 204, + 319 + ], + "lines": [ + { + "bbox": [ + 117, + 231, + 204, + 319 + ], + "spans": [ + { + "bbox": [ + 117, + 231, + 204, + 319 + ], + "type": "image", + "image_path": "55dc8086726bc727505f262393f9f58520886c8655e9a928d6c1c6186955bbfd.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 319, + 307, + 330 + ], + "lines": [ + { + "bbox": [ + 118, + 319, + 307, + 330 + ], + "spans": [ + { + "bbox": [ + 118, + 319, + 307, + 330 + ], + "type": "text", + "content": "Input Text: \"Change this into a cubist painting.\"" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 214, + 231, + 302, + 319 + ], + "blocks": [ + { + "bbox": [ + 214, + 231, + 302, + 319 + ], + "lines": [ + { + "bbox": [ + 214, + 231, + 302, + 319 + ], + "spans": [ + { + "bbox": [ + 214, + 231, + 302, + 319 + ], + "type": "image", + "image_path": "d6c8b55a284c54268e71caa1c9af85186c073992a08e7df944a4827df416126b.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 231, + 397, + 318 + ], + "blocks": [ + { + "bbox": [ + 310, + 231, + 397, + 318 + ], + "lines": [ + { + "bbox": [ + 310, + 231, + 397, + 318 + ], + "spans": [ + { + "bbox": [ + 310, + 231, + 397, + 318 + ], + "type": "image", + "image_path": "208b4450c1e449eb5c1aec21ac1e69f0930c6ad52d64be0770fb28058b41a6de.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 407, + 231, + 494, + 318 + ], + "blocks": [ + { + "bbox": [ + 407, + 231, + 494, + 318 + ], + "lines": [ + { + "bbox": [ + 407, + 231, + 494, + 318 + ], + "spans": [ + { + "bbox": [ + 407, + 231, + 494, + 318 + ], + "type": "image", + "image_path": "c7a9ad7f6f46ee29f8dc6c06aea221cccb760335b906b633f9630413215ff700.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 118, + 335, + 205, + 421 + ], + "blocks": [ + { + "bbox": [ + 118, + 335, + 205, + 421 + ], + "lines": [ + { + "bbox": [ + 118, + 335, + 205, + 421 + ], + "spans": [ + { + "bbox": [ + 118, + 335, + 205, + 421 + ], + "type": "image", + "image_path": "28ce32f62ab276444f6b71d92bf45eedf3ce7200dfed382d7460f96631f73071.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 214, + 335, + 301, + 420 + ], + "blocks": [ + { + "bbox": [ + 214, + 335, + 301, + 420 + ], + "lines": [ + { + "bbox": [ + 214, + 335, + 301, + 420 + ], + "spans": [ + { + "bbox": [ + 214, + 335, + 301, + 420 + ], + "type": "image", + "image_path": "662a88ccc330476d2b5521ea30b676e65a0af1c7ce47b24527e5328cb5d23989.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 312, + 335, + 399, + 422 + ], + "blocks": [ + { + "bbox": [ + 312, + 335, + 399, + 422 + ], + "lines": [ + { + "bbox": [ + 312, + 335, + 399, + 422 + ], + "spans": [ + { + "bbox": [ + 312, + 335, + 399, + 422 + ], + "type": "image", + "image_path": "4f6fc9b2404193b922604fdb04aa7ba553fa2808adea7ae850000fdbcf1459d2.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 423, + 418, + 433 + ], + "lines": [ + { + "bbox": [ + 118, + 423, + 418, + 433 + ], + "spans": [ + { + "bbox": [ + 118, + 423, + 418, + 433 + ], + "type": "text", + "content": "Input Text: \"Make the image appear as if it's a woodblock print by Hokusai.\"" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 407, + 334, + 494, + 422 + ], + "blocks": [ + { + "bbox": [ + 407, + 334, + 494, + 422 + ], + "lines": [ + { + "bbox": [ + 407, + 334, + 494, + 422 + ], + "spans": [ + { + "bbox": [ + 407, + 334, + 494, + 422 + ], + "type": "image", + "image_path": "3ae48117c4aba996216a7bc491ef96a7ef76812eb31cfce77a2b900f543504dc.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 118, + 436, + 205, + 517 + ], + "blocks": [ + { + "bbox": [ + 118, + 436, + 205, + 517 + ], + "lines": [ + { + "bbox": [ + 118, + 436, + 205, + 517 + ], + "spans": [ + { + "bbox": [ + 118, + 436, + 205, + 517 + ], + "type": "image", + "image_path": "e58eec2c9254e2da8414f358c4e5148a84d45f6a54957dab8cb6f162f5233635.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 519, + 362, + 529 + ], + "lines": [ + { + "bbox": [ + 118, + 519, + 362, + 529 + ], + "spans": [ + { + "bbox": [ + 118, + 519, + 362, + 529 + ], + "type": "text", + "content": "Input Text: \"Change the background to Fushimi Inari Taisha.\"" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 216, + 436, + 302, + 517 + ], + "blocks": [ + { + "bbox": [ + 216, + 436, + 302, + 517 + ], + "lines": [ + { + "bbox": [ + 216, + 436, + 302, + 517 + ], + "spans": [ + { + "bbox": [ + 216, + 436, + 302, + 517 + ], + "type": "image", + "image_path": "cca5f714390c8193eca135b0bd9a7e4da0d407fca7b6a93aba9022833c487d0b.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 312, + 436, + 399, + 517 + ], + "blocks": [ + { + "bbox": [ + 312, + 436, + 399, + 517 + ], + "lines": [ + { + "bbox": [ + 312, + 436, + 399, + 517 + ], + "spans": [ + { + "bbox": [ + 312, + 436, + 399, + 517 + ], + "type": "image", + "image_path": "742bbe6bc7ddca81a49887d3b626885d9ffe8e90e72e044edaaa388cf541ef05.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 408, + 437, + 493, + 517 + ], + "blocks": [ + { + "bbox": [ + 408, + 437, + 493, + 517 + ], + "lines": [ + { + "bbox": [ + 408, + 437, + 493, + 517 + ], + "spans": [ + { + "bbox": [ + 408, + 437, + 493, + 517 + ], + "type": "image", + "image_path": "2c704075d95d45fe4f0f7a7b678bfc45ee73af1834623f4fe6e02497499106c1.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 118, + 532, + 204, + 619 + ], + "blocks": [ + { + "bbox": [ + 118, + 532, + 204, + 619 + ], + "lines": [ + { + "bbox": [ + 118, + 532, + 204, + 619 + ], + "spans": [ + { + "bbox": [ + 118, + 532, + 204, + 619 + ], + "type": "image", + "image_path": "7397b99d3dbd4f1f9b6ee30e701e62c0f7e12919bfa41da478790936477b2c34.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 620, + 369, + 632 + ], + "lines": [ + { + "bbox": [ + 118, + 620, + 369, + 632 + ], + "spans": [ + { + "bbox": [ + 118, + 620, + 369, + 632 + ], + "type": "text", + "content": "Input Text: \"Make the image appear like a Rembrandt painting.\"" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 77, + 667, + 532, + 712 + ], + "lines": [ + { + "bbox": [ + 77, + 667, + 532, + 712 + ], + "spans": [ + { + "bbox": [ + 77, + 667, + 532, + 712 + ], + "type": "text", + "content": "Figure 18: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MagicBrush [120]. Observations: This set of examples further demonstrates GPT-4o's robust capabilities in style editing and background modification, consistent with the findings previously presented in Figure 16." + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 216, + 533, + 301, + 618 + ], + "blocks": [ + { + "bbox": [ + 216, + 533, + 301, + 618 + ], + "lines": [ + { + "bbox": [ + 216, + 533, + 301, + 618 + ], + "spans": [ + { + "bbox": [ + 216, + 533, + 301, + 618 + ], + "type": "image", + "image_path": "f4667249d38b5a17f284e530b43ccc19dae4b4df74a383b000d041b25e9cc575.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 312, + 533, + 397, + 617 + ], + "blocks": [ + { + "bbox": [ + 312, + 533, + 397, + 617 + ], + "lines": [ + { + "bbox": [ + 312, + 533, + 397, + 617 + ], + "spans": [ + { + "bbox": [ + 312, + 533, + 397, + 617 + ], + "type": "image", + "image_path": "a0db75620e3e1969b865f1e6ae4a41bb07983621337b59c87edc39c85287a2dc.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 407, + 533, + 493, + 618 + ], + "blocks": [ + { + "bbox": [ + 407, + 533, + 493, + 618 + ], + "lines": [ + { + "bbox": [ + 407, + 533, + 493, + 618 + ], + "spans": [ + { + "bbox": [ + 407, + 533, + 493, + 618 + ], + "type": "image", + "image_path": "5f20c93306fe70d43acd29c720c491e8d5d808966bbaa918395e1a87b2755d64.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "bbox": [ + 135, + 638, + 186, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 638, + 186, + 649 + ], + "spans": [ + { + "bbox": [ + 135, + 638, + 186, + 649 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 242, + 638, + 272, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 638, + 272, + 647 + ], + "spans": [ + { + "bbox": [ + 242, + 638, + 272, + 647 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 321, + 638, + 386, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 638, + 386, + 647 + ], + "spans": [ + { + "bbox": [ + 321, + 638, + 386, + 647 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 426, + 638, + 473, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 426, + 638, + 473, + 648 + ], + "spans": [ + { + "bbox": [ + 426, + 638, + 473, + 648 + ], + "type": "text", + "content": "MagicBrush" + } + ] + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 134, + 85, + 196, + 98 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 85, + 196, + 98 + ], + "spans": [ + { + "bbox": [ + 134, + 85, + 196, + 98 + ], + "type": "text", + "content": "Image Editing" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 202, + 105, + 216, + 118 + ], + "blocks": [ + { + "bbox": [ + 202, + 105, + 216, + 118 + ], + "lines": [ + { + "bbox": [ + 202, + 105, + 216, + 118 + ], + "spans": [ + { + "bbox": [ + 202, + 105, + 216, + 118 + ], + "type": "image", + "image_path": "fd02f523b32582d773f60da26889163fcfeb85e2ad61c1cbbf337661f949aa5b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 216, + 106, + 405, + 119 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 106, + 405, + 119 + ], + "spans": [ + { + "bbox": [ + 216, + 106, + 405, + 119 + ], + "type": "text", + "content": "Evaluation: Instruction-following / faithful." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 116, + 127, + 203, + 213 + ], + "blocks": [ + { + "bbox": [ + 116, + 127, + 203, + 213 + ], + "lines": [ + { + "bbox": [ + 116, + 127, + 203, + 213 + ], + "spans": [ + { + "bbox": [ + 116, + 127, + 203, + 213 + ], + "type": "image", + "image_path": "cdc522f7841c2077f741a6f72a86ede9c15c561e6b84d17afd15ae44085b29cf.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 215, + 312, + 225 + ], + "lines": [ + { + "bbox": [ + 118, + 215, + 312, + 225 + ], + "spans": [ + { + "bbox": [ + 118, + 215, + 312, + 225 + ], + "type": "text", + "content": "Input Text: \"Make the image look like a cartoon.\"" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 212, + 127, + 299, + 213 + ], + "blocks": [ + { + "bbox": [ + 212, + 127, + 299, + 213 + ], + "lines": [ + { + "bbox": [ + 212, + 127, + 299, + 213 + ], + "spans": [ + { + "bbox": [ + 212, + 127, + 299, + 213 + ], + "type": "image", + "image_path": "d89d446208b65160e03ce4ced290520055aa6e2461f4822a704190db34fdc06c.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 309, + 128, + 397, + 213 + ], + "blocks": [ + { + "bbox": [ + 309, + 128, + 397, + 213 + ], + "lines": [ + { + "bbox": [ + 309, + 128, + 397, + 213 + ], + "spans": [ + { + "bbox": [ + 309, + 128, + 397, + 213 + ], + "type": "image", + "image_path": "ae6de595371f09d4b568904e9ac0c1c094ae221ff86659caaecc10d0524ac9df.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 406, + 128, + 492, + 213 + ], + "blocks": [ + { + "bbox": [ + 406, + 128, + 492, + 213 + ], + "lines": [ + { + "bbox": [ + 406, + 128, + 492, + 213 + ], + "spans": [ + { + "bbox": [ + 406, + 128, + 492, + 213 + ], + "type": "image", + "image_path": "bb239bc12ad173112db53edc8c9ee898e6ae74bcd56f4a9d4d2f4e2aa118d2f3.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 118, + 232, + 203, + 316 + ], + "blocks": [ + { + "bbox": [ + 118, + 232, + 203, + 316 + ], + "lines": [ + { + "bbox": [ + 118, + 232, + 203, + 316 + ], + "spans": [ + { + "bbox": [ + 118, + 232, + 203, + 316 + ], + "type": "image", + "image_path": "777ec73ae37e58c0b5c2111e81cad23a6a071d747be202d05b8e482979d29b98.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 317, + 394, + 327 + ], + "lines": [ + { + "bbox": [ + 118, + 317, + 394, + 327 + ], + "spans": [ + { + "bbox": [ + 118, + 317, + 394, + 327 + ], + "type": "text", + "content": "Input Text: \"Change the bike frame to be shiny metal instead of red.\"" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 216, + 232, + 300, + 316 + ], + "blocks": [ + { + "bbox": [ + 216, + 232, + 300, + 316 + ], + "lines": [ + { + "bbox": [ + 216, + 232, + 300, + 316 + ], + "spans": [ + { + "bbox": [ + 216, + 232, + 300, + 316 + ], + "type": "image", + "image_path": "e4187d7ed6a1b514ec5676db3d2003f937bfa937564597d410716eae507bb1ca.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 312, + 232, + 397, + 316 + ], + "blocks": [ + { + "bbox": [ + 312, + 232, + 397, + 316 + ], + "lines": [ + { + "bbox": [ + 312, + 232, + 397, + 316 + ], + "spans": [ + { + "bbox": [ + 312, + 232, + 397, + 316 + ], + "type": "image", + "image_path": "eb16bd445147255cf3980142ad981069b415244e78b0a5cc390ff3c9e7af61b2.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 406, + 232, + 492, + 317 + ], + "blocks": [ + { + "bbox": [ + 406, + 232, + 492, + 317 + ], + "lines": [ + { + "bbox": [ + 406, + 232, + 492, + 317 + ], + "spans": [ + { + "bbox": [ + 406, + 232, + 492, + 317 + ], + "type": "image", + "image_path": "7b45aa773deb1657966b4ed79bd2b961c599f2a4ba279796de04e8ddce076662.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 119, + 337, + 205, + 421 + ], + "blocks": [ + { + "bbox": [ + 119, + 337, + 205, + 421 + ], + "lines": [ + { + "bbox": [ + 119, + 337, + 205, + 421 + ], + "spans": [ + { + "bbox": [ + 119, + 337, + 205, + 421 + ], + "type": "image", + "image_path": "6a5555b889ead9b7c54a679f8554efb5478d55c2bbab8a2783cd24e9f0e28abb.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 422, + 340, + 433 + ], + "lines": [ + { + "bbox": [ + 119, + 422, + 340, + 433 + ], + "spans": [ + { + "bbox": [ + 119, + 422, + 340, + 433 + ], + "type": "text", + "content": "Input Text: \"Change the table color from blue to black.\"" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 218, + 337, + 303, + 421 + ], + "blocks": [ + { + "bbox": [ + 218, + 337, + 303, + 421 + ], + "lines": [ + { + "bbox": [ + 218, + 337, + 303, + 421 + ], + "spans": [ + { + "bbox": [ + 218, + 337, + 303, + 421 + ], + "type": "image", + "image_path": "c86c4b988057b5e5ca08b92e4c09d8111a097f908d27efcd8da7c2ac29c5583a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 313, + 337, + 398, + 421 + ], + "blocks": [ + { + "bbox": [ + 313, + 337, + 398, + 421 + ], + "lines": [ + { + "bbox": [ + 313, + 337, + 398, + 421 + ], + "spans": [ + { + "bbox": [ + 313, + 337, + 398, + 421 + ], + "type": "image", + "image_path": "fafbcde737a7a7fc8337bc987984a76a03a10ed89b37798dab11796bc6e03f52.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 408, + 337, + 493, + 421 + ], + "blocks": [ + { + "bbox": [ + 408, + 337, + 493, + 421 + ], + "lines": [ + { + "bbox": [ + 408, + 337, + 493, + 421 + ], + "spans": [ + { + "bbox": [ + 408, + 337, + 493, + 421 + ], + "type": "image", + "image_path": "d02f5e582a519b9d44e58b3ed2a49efbf23380fc85233c22949e12e735fc0378.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 120, + 439, + 206, + 520 + ], + "blocks": [ + { + "bbox": [ + 120, + 439, + 206, + 520 + ], + "lines": [ + { + "bbox": [ + 120, + 439, + 206, + 520 + ], + "spans": [ + { + "bbox": [ + 120, + 439, + 206, + 520 + ], + "type": "image", + "image_path": "ee337d58007e700256aa61fb4d68f998f7a15c4a2662d8db4f8d622da1b1e1fb.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 521, + 328, + 533 + ], + "lines": [ + { + "bbox": [ + 119, + 521, + 328, + 533 + ], + "spans": [ + { + "bbox": [ + 119, + 521, + 328, + 533 + ], + "type": "text", + "content": "Input Text: \"Change the woman's hair to be all blue.\"" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 217, + 440, + 303, + 520 + ], + "blocks": [ + { + "bbox": [ + 217, + 440, + 303, + 520 + ], + "lines": [ + { + "bbox": [ + 217, + 440, + 303, + 520 + ], + "spans": [ + { + "bbox": [ + 217, + 440, + 303, + 520 + ], + "type": "image", + "image_path": "c9364ac74a276a68b6a4096cb5d76160b136e7879dc4af294489f3c2d5723738.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 314, + 440, + 400, + 520 + ], + "blocks": [ + { + "bbox": [ + 314, + 440, + 400, + 520 + ], + "lines": [ + { + "bbox": [ + 314, + 440, + 400, + 520 + ], + "spans": [ + { + "bbox": [ + 314, + 440, + 400, + 520 + ], + "type": "image", + "image_path": "91b32ed2b4041faded4ca926aa6a533ba22ec3c950274f75ac5ae3556ef420f0.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 408, + 440, + 495, + 520 + ], + "blocks": [ + { + "bbox": [ + 408, + 440, + 495, + 520 + ], + "lines": [ + { + "bbox": [ + 408, + 440, + 495, + 520 + ], + "spans": [ + { + "bbox": [ + 408, + 440, + 495, + 520 + ], + "type": "image", + "image_path": "50fb798f21796fa55105c09f2e99d39e009e8f0617af0664a4836dbe70e314d0.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 120, + 540, + 206, + 624 + ], + "blocks": [ + { + "bbox": [ + 120, + 540, + 206, + 624 + ], + "lines": [ + { + "bbox": [ + 120, + 540, + 206, + 624 + ], + "spans": [ + { + "bbox": [ + 120, + 540, + 206, + 624 + ], + "type": "image", + "image_path": "91219e5a27a8f4d0eede45256f0b86e5561a80dee1a5c603cab39235011f504d.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 138, + 639, + 188, + 651 + ], + "lines": [ + { + "bbox": [ + 138, + 639, + 188, + 651 + ], + "spans": [ + { + "bbox": [ + 138, + 639, + 188, + 651 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 217, + 540, + 304, + 625 + ], + "blocks": [ + { + "bbox": [ + 217, + 540, + 304, + 625 + ], + "lines": [ + { + "bbox": [ + 217, + 540, + 304, + 625 + ], + "spans": [ + { + "bbox": [ + 217, + 540, + 304, + 625 + ], + "type": "image", + "image_path": "7e6bb403295173dc6c676365988dcd5fd4a615b8d4e09058c0895add12fbd47c.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 122, + 627, + 369, + 637 + ], + "lines": [ + { + "bbox": [ + 122, + 627, + 369, + 637 + ], + "spans": [ + { + "bbox": [ + 122, + 627, + 369, + 637 + ], + "type": "text", + "content": "Input Text: \"Make the color of the airplane be yellow instead.\"" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 244, + 639, + 274, + 649 + ], + "lines": [ + { + "bbox": [ + 244, + 639, + 274, + 649 + ], + "spans": [ + { + "bbox": [ + 244, + 639, + 274, + 649 + ], + "type": "text", + "content": "GPT-40" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 314, + 540, + 402, + 626 + ], + "blocks": [ + { + "bbox": [ + 314, + 540, + 402, + 626 + ], + "lines": [ + { + "bbox": [ + 314, + 540, + 402, + 626 + ], + "spans": [ + { + "bbox": [ + 314, + 540, + 402, + 626 + ], + "type": "image", + "image_path": "2c0a4cd420ef9d5e4634c2962449526f8adf2f9dc3c5b4ca6622d4b56c4d08ad.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 323, + 639, + 388, + 649 + ], + "lines": [ + { + "bbox": [ + 323, + 639, + 388, + 649 + ], + "spans": [ + { + "bbox": [ + 323, + 639, + 388, + 649 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 77, + 668, + 533, + 713 + ], + "lines": [ + { + "bbox": [ + 77, + 668, + 533, + 713 + ], + "spans": [ + { + "bbox": [ + 77, + 668, + 533, + 713 + ], + "type": "text", + "content": "Figure 19: Task: Image editing for modifying visual elements and composition. Setup: GPT-4o vs. Gemini 2.0 Flash [99]/MagicBrush [120]. Observations: Example 4 highlights GPT-4o's superior image understanding—accurately distinguishing between hair and a scarf (where MagicBrush fails) to execute the edit. In Example 5, its precise retention of the plane's logo and text further demonstrates robust object-preservation capabilities." + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 408, + 540, + 493, + 626 + ], + "blocks": [ + { + "bbox": [ + 408, + 540, + 493, + 626 + ], + "lines": [ + { + "bbox": [ + 408, + 540, + 493, + 626 + ], + "spans": [ + { + "bbox": [ + 408, + 540, + 493, + 626 + ], + "type": "image", + "image_path": "217541d90806a38fd995d3e22236061f85eb7bc63300a2acde2bd0f1910bb8aa.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 428, + 639, + 474, + 651 + ], + "lines": [ + { + "bbox": [ + 428, + 639, + 474, + 651 + ], + "spans": [ + { + "bbox": [ + 428, + 639, + 474, + 651 + ], + "type": "text", + "content": "MagicBrush" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 173, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 173, + 83 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 173, + 83 + ], + "type": "text", + "content": "2.2.3 Customization" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 91, + 533, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 91, + 533, + 168 + ], + "spans": [ + { + "bbox": [ + 76, + 91, + 533, + 168 + ], + "type": "text", + "content": "Customization, also known as subject-driven generation or personalization, aims to enable visual generative models to generate visual concepts from given reference images. Initial methods [31, 91] have achieved this by optimizing text embeddings or model weights. Subsequent approaches [50, 36, 46, 125, 94, 129] expanded on these approaches to handle multiple visual concepts. Customization plays a crucial role in making visual generative models more flexible and applicable across diverse domains. By empowering models to adapt to user-provided inputs, it ensures outputs are tailored to specific visual concepts. This is particularly significant in industries such as artistic creation and advertising, where individualization and creativity are paramount." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 76, + 173, + 533, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 173, + 533, + 294 + ], + "spans": [ + { + "bbox": [ + 76, + 173, + 533, + 294 + ], + "type": "text", + "content": "To evaluate the performance of GPT-4o in this challenging task, we collect reference images from previous relevant works [130, 103], and conduct qualitative comparisons as shown in Figure 20 and Figure 21. For single-concept customization, we compare GPT-4o with Gemini 2.0 Flash and DisEnvisioner [130]. The results demonstrate that GPT-4o not only faithfully reproduces the visual concept from the reference image but also accurately adheres to the given textual description. In this task, GPT-4o significantly outperforms Gemini 2.0 Flash and achieves performance on par with the SOTA customization method. However, the images generated by GPT-4o still exhibit some \"copy-paste\" artifacts, leaving room for further improvement in the future. For multi-concept customization, we compare GPT-4o with Gemini 2.0 Flash and MS-Diffusion [103]. In this task, GPT-4o can still achieve competitive results for customizing multiple visual concepts in different contexts. Unfortunately, it struggles with certain unique combinations (e.g., making a dog wear a human dress), which could be attributed to the lack of relevant customization training data." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 298, + 533, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 298, + 533, + 333 + ], + "spans": [ + { + "bbox": [ + 77, + 298, + 533, + 333 + ], + "type": "text", + "content": "Overall, GPT-4o demonstrates impressive performance in both single-concept and multi-concept customization tasks, showcasing strong concept fidelity and great text alignment. Despite some limitations, GPT-4o achieves remarkable results on par with SOTA customization methods and outperforms Gemini 2.0 Flash." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 106, + 177, + 131 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 106, + 177, + 131 + ], + "spans": [ + { + "bbox": [ + 107, + 106, + 177, + 131 + ], + "type": "text", + "content": "Customization (Single concept)" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 107, + 138, + 121, + 154 + ], + "blocks": [ + { + "bbox": [ + 107, + 138, + 121, + 154 + ], + "lines": [ + { + "bbox": [ + 107, + 138, + 121, + 154 + ], + "spans": [ + { + "bbox": [ + 107, + 138, + 121, + 154 + ], + "type": "image", + "image_path": "646e79ac9f1f81f041625ce775812c9530257492f90e5afea41349ce5cd6894a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 122, + 142, + 487, + 157 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 142, + 487, + 157 + ], + "spans": [ + { + "bbox": [ + 122, + 142, + 487, + 157 + ], + "type": "text", + "content": "Evaluation: Corresponding visual concepts of given reference images." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 92, + 161, + 195, + 263 + ], + "blocks": [ + { + "bbox": [ + 92, + 161, + 195, + 263 + ], + "lines": [ + { + "bbox": [ + 92, + 161, + 195, + 263 + ], + "spans": [ + { + "bbox": [ + 92, + 161, + 195, + 263 + ], + "type": "image", + "image_path": "7b9ec0dd32124c06ec282d499d80f72d58efd6c6b8339cfbdfe41eea456580d6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 197, + 161, + 299, + 263 + ], + "blocks": [ + { + "bbox": [ + 197, + 161, + 299, + 263 + ], + "lines": [ + { + "bbox": [ + 197, + 161, + 299, + 263 + ], + "spans": [ + { + "bbox": [ + 197, + 161, + 299, + 263 + ], + "type": "image", + "image_path": "29e4e012635440b4b8fd5440387c6bcf7b8f25f3e0e7c0ea56e381f52b1a1451.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 302, + 161, + 405, + 263 + ], + "blocks": [ + { + "bbox": [ + 302, + 161, + 405, + 263 + ], + "lines": [ + { + "bbox": [ + 302, + 161, + 405, + 263 + ], + "spans": [ + { + "bbox": [ + 302, + 161, + 405, + 263 + ], + "type": "image", + "image_path": "f596fda615ad13c09d3de7a59c67e2adf0d99aa3880879a7dfca06b8ddd317e8.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 408, + 161, + 511, + 263 + ], + "blocks": [ + { + "bbox": [ + 408, + 161, + 511, + 263 + ], + "lines": [ + { + "bbox": [ + 408, + 161, + 511, + 263 + ], + "spans": [ + { + "bbox": [ + 408, + 161, + 511, + 263 + ], + "type": "image", + "image_path": "ed7f35bce8d09ae0a591d4b7e14edd5bbe5913c727af2763219ee8eccf546612.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 96, + 270, + 493, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 270, + 493, + 295 + ], + "spans": [ + { + "bbox": [ + 96, + 270, + 493, + 295 + ], + "type": "text", + "content": "Input Text: \"A dog on top of a purple rug in a forest, with reference to the attached image.\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 92, + 300, + 195, + 404 + ], + "blocks": [ + { + "bbox": [ + 92, + 300, + 195, + 404 + ], + "lines": [ + { + "bbox": [ + 92, + 300, + 195, + 404 + ], + "spans": [ + { + "bbox": [ + 92, + 300, + 195, + 404 + ], + "type": "image", + "image_path": "c337d9db266ab1888bf435abcc2ea4dfede05853a0b6a6b5906b13f424c95dc6.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 197, + 300, + 300, + 403 + ], + "blocks": [ + { + "bbox": [ + 197, + 300, + 300, + 403 + ], + "lines": [ + { + "bbox": [ + 197, + 300, + 300, + 403 + ], + "spans": [ + { + "bbox": [ + 197, + 300, + 300, + 403 + ], + "type": "image", + "image_path": "be6c16bf461ad6ab94168d3e79ef58fabfe4403b526f0dc3a29bc696d3d3531e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 302, + 300, + 405, + 403 + ], + "blocks": [ + { + "bbox": [ + 302, + 300, + 405, + 403 + ], + "lines": [ + { + "bbox": [ + 302, + 300, + 405, + 403 + ], + "spans": [ + { + "bbox": [ + 302, + 300, + 405, + 403 + ], + "type": "image", + "image_path": "58f6525436f8706212ce32abd6aa2f4ba0d58a7cd53654d0f24fecf81b3fb9cb.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 408, + 300, + 511, + 404 + ], + "blocks": [ + { + "bbox": [ + 408, + 300, + 511, + 404 + ], + "lines": [ + { + "bbox": [ + 408, + 300, + 511, + 404 + ], + "spans": [ + { + "bbox": [ + 408, + 300, + 511, + 404 + ], + "type": "image", + "image_path": "b05322a8b0f75be84fd68e7685029fe222a8456412836c464982ecfc2b379f16.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 96, + 411, + 466, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 411, + 466, + 424 + ], + "spans": [ + { + "bbox": [ + 96, + 411, + 466, + 424 + ], + "type": "text", + "content": "Input Text: \"A cat wearing a Santa hat, with reference to the attached image.\"" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 93, + 432, + 195, + 534 + ], + "blocks": [ + { + "bbox": [ + 93, + 432, + 195, + 534 + ], + "lines": [ + { + "bbox": [ + 93, + 432, + 195, + 534 + ], + "spans": [ + { + "bbox": [ + 93, + 432, + 195, + 534 + ], + "type": "image", + "image_path": "2766a0e15a98adf29692d023a9e4c39fef5c3e5ec591c397ca4c3600d9eb3690.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 599, + 533, + 689 + ], + "lines": [ + { + "bbox": [ + 77, + 599, + 533, + 689 + ], + "spans": [ + { + "bbox": [ + 77, + 599, + 533, + 689 + ], + "type": "text", + "content": "Figure 20: Task: Single-concept customization. The goal is to generate images that faithfully reproduce a single visual concept from reference images while aligning with a given textual description. Setup: Reference images are collected from prior works [130], and results are compared across GPT-4o, Gemini 2.0 Flash [99], and DisEnvisioner [130]. Each row includes the input reference image, text prompt, and the corresponding outputs. Observations: GPT-4o demonstrates strong performance in faithfully reproducing the single visual concept with high fidelity while adhering closely to the given textual description. It consistently outperforms Gemini 2.0 Flash and achieves results comparable to the SOTA method DisEnvisioner. However, some generated images still exhibit minor \"copy-paste\" artifacts, indicating room for further improvement." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 197, + 432, + 299, + 535 + ], + "blocks": [ + { + "bbox": [ + 197, + 432, + 299, + 535 + ], + "lines": [ + { + "bbox": [ + 197, + 432, + 299, + 535 + ], + "spans": [ + { + "bbox": [ + 197, + 432, + 299, + 535 + ], + "type": "image", + "image_path": "b6d6e46c7e1fd50306d7fb544b81d3bcbe88cbf2672995aac7590c083fadec72.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 302, + 432, + 405, + 535 + ], + "blocks": [ + { + "bbox": [ + 302, + 432, + 405, + 535 + ], + "lines": [ + { + "bbox": [ + 302, + 432, + 405, + 535 + ], + "spans": [ + { + "bbox": [ + 302, + 432, + 405, + 535 + ], + "type": "image", + "image_path": "decdf079eb5a7ceb8546733501c7687db436872547911348307fba42a262e86d.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 408, + 432, + 511, + 535 + ], + "blocks": [ + { + "bbox": [ + 408, + 432, + 511, + 535 + ], + "lines": [ + { + "bbox": [ + 408, + 432, + 511, + 535 + ], + "spans": [ + { + "bbox": [ + 408, + 432, + 511, + 535 + ], + "type": "image", + "image_path": "27b3efd0800340d8acae187e104fb4c863298773ee0e13233df9fd4a8ff2e810.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 96, + 537, + 490, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 537, + 490, + 563 + ], + "spans": [ + { + "bbox": [ + 96, + 537, + 490, + 563 + ], + "type": "text", + "content": "Input Text: \"A pair of glasses with a tree and autumn leaves in the background, with reference to the attached image.\"" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 113, + 569, + 174, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 569, + 174, + 582 + ], + "spans": [ + { + "bbox": [ + 113, + 569, + 174, + 582 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 230, + 569, + 265, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 569, + 265, + 581 + ], + "spans": [ + { + "bbox": [ + 230, + 569, + 265, + 581 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 569, + 392, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 569, + 392, + 581 + ], + "spans": [ + { + "bbox": [ + 316, + 569, + 392, + 581 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 428, + 569, + 492, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 428, + 569, + 492, + 580 + ], + "spans": [ + { + "bbox": [ + 428, + 569, + 492, + 580 + ], + "type": "text", + "content": "DisEnvisioner" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 96, + 81, + 187, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 81, + 187, + 105 + ], + "spans": [ + { + "bbox": [ + 96, + 81, + 187, + 105 + ], + "type": "text", + "content": "Customization (Multiple concepts)" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 112, + 110, + 125, + 125 + ], + "blocks": [ + { + "bbox": [ + 112, + 110, + 125, + 125 + ], + "lines": [ + { + "bbox": [ + 112, + 110, + 125, + 125 + ], + "spans": [ + { + "bbox": [ + 112, + 110, + 125, + 125 + ], + "type": "image", + "image_path": "9f7208b84dbf5365d061b28896f055f01299d12b30af1dee82093163431a17e5.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 126, + 114, + 492, + 128 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 114, + 492, + 128 + ], + "spans": [ + { + "bbox": [ + 126, + 114, + 492, + 128 + ], + "type": "text", + "content": "Evaluation: Corresponding visual concepts of given reference images." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 93, + 133, + 176, + 217 + ], + "blocks": [ + { + "bbox": [ + 93, + 133, + 176, + 217 + ], + "lines": [ + { + "bbox": [ + 93, + 133, + 176, + 217 + ], + "spans": [ + { + "bbox": [ + 93, + 133, + 176, + 217 + ], + "type": "image", + "image_path": "2982b017c59027bd916adf4c71fec2ef4c7b20ea6fcb1b3e05ccead2d64e7e70.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 177, + 133, + 258, + 217 + ], + "blocks": [ + { + "bbox": [ + 177, + 133, + 258, + 217 + ], + "lines": [ + { + "bbox": [ + 177, + 133, + 258, + 217 + ], + "spans": [ + { + "bbox": [ + 177, + 133, + 258, + 217 + ], + "type": "image", + "image_path": "a43b10404face144826ade37cce23e7383b09b5f6616cd1ba4fb27d65847035f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 258, + 133, + 340, + 217 + ], + "blocks": [ + { + "bbox": [ + 258, + 133, + 340, + 217 + ], + "lines": [ + { + "bbox": [ + 258, + 133, + 340, + 217 + ], + "spans": [ + { + "bbox": [ + 258, + 133, + 340, + 217 + ], + "type": "image", + "image_path": "08504d0ac60ae0b5850f6667bc744a4e3d743395a161f0de6a775051bb6f7278.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 340, + 133, + 423, + 217 + ], + "blocks": [ + { + "bbox": [ + 340, + 133, + 423, + 217 + ], + "lines": [ + { + "bbox": [ + 340, + 133, + 423, + 217 + ], + "spans": [ + { + "bbox": [ + 340, + 133, + 423, + 217 + ], + "type": "image", + "image_path": "6217bbdb7e9c6d3d88bbced40c11ad8ad80d6cc7c0af8b951f80ab29474cc67e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 423, + 133, + 507, + 217 + ], + "blocks": [ + { + "bbox": [ + 423, + 133, + 507, + 217 + ], + "lines": [ + { + "bbox": [ + 423, + 133, + 507, + 217 + ], + "spans": [ + { + "bbox": [ + 423, + 133, + 507, + 217 + ], + "type": "image", + "image_path": "b359a631d7b7dee251d05aec14e46acaab4300aca8d16ae17240156e943e2d83.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 96, + 224, + 507, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 224, + 507, + 239 + ], + "spans": [ + { + "bbox": [ + 96, + 224, + 507, + 239 + ], + "type": "text", + "content": "Input Text: \"A dog wearing a dress in the snow, with reference to the attached images.\"" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 93, + 245, + 175, + 327 + ], + "blocks": [ + { + "bbox": [ + 93, + 245, + 175, + 327 + ], + "lines": [ + { + "bbox": [ + 93, + 245, + 175, + 327 + ], + "spans": [ + { + "bbox": [ + 93, + 245, + 175, + 327 + ], + "type": "image", + "image_path": "7e7b185220c6a783809189a0317a2ebff5e610e53e31f229bc61cc66cb9833ff.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 175, + 245, + 257, + 327 + ], + "blocks": [ + { + "bbox": [ + 175, + 245, + 257, + 327 + ], + "lines": [ + { + "bbox": [ + 175, + 245, + 257, + 327 + ], + "spans": [ + { + "bbox": [ + 175, + 245, + 257, + 327 + ], + "type": "image", + "image_path": "cb7b5802882687334bb83b37842a711b7d6066ee9b162b7773203fa931b58dae.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 257, + 245, + 339, + 327 + ], + "blocks": [ + { + "bbox": [ + 257, + 245, + 339, + 327 + ], + "lines": [ + { + "bbox": [ + 257, + 245, + 339, + 327 + ], + "spans": [ + { + "bbox": [ + 257, + 245, + 339, + 327 + ], + "type": "image", + "image_path": "24acb3bb969c56cf9068bc75a47163bb4b2fd5cfb83a6ae51eec791707338fa8.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 339, + 245, + 421, + 327 + ], + "blocks": [ + { + "bbox": [ + 339, + 245, + 421, + 327 + ], + "lines": [ + { + "bbox": [ + 339, + 245, + 421, + 327 + ], + "spans": [ + { + "bbox": [ + 339, + 245, + 421, + 327 + ], + "type": "image", + "image_path": "f93e5e0c608562728e8855f8158f8f8dfca8a94d4114bd194f042654bffe10c0.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 421, + 245, + 503, + 327 + ], + "blocks": [ + { + "bbox": [ + 421, + 245, + 503, + 327 + ], + "lines": [ + { + "bbox": [ + 421, + 245, + 503, + 327 + ], + "spans": [ + { + "bbox": [ + 421, + 245, + 503, + 327 + ], + "type": "image", + "image_path": "9e7f0f5aebdeb1b02b10a56ee4521570fb0755150225325cdd221a08f72aeae6.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 95, + 333, + 492, + 358 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 333, + 492, + 358 + ], + "spans": [ + { + "bbox": [ + 95, + 333, + 492, + 358 + ], + "type": "text", + "content": "Input Text: \"A flower with a barn in the background, with reference to the attached images.\"" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 93, + 364, + 175, + 445 + ], + "blocks": [ + { + "bbox": [ + 93, + 364, + 175, + 445 + ], + "lines": [ + { + "bbox": [ + 93, + 364, + 175, + 445 + ], + "spans": [ + { + "bbox": [ + 93, + 364, + 175, + 445 + ], + "type": "image", + "image_path": "aab60cd3ceb814eae695e39c17c9ede66cb9a44a4700601d9febe20289cdba35.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 636, + 533, + 715 + ], + "lines": [ + { + "bbox": [ + 77, + 636, + 533, + 715 + ], + "spans": [ + { + "bbox": [ + 77, + 636, + 533, + 715 + ], + "type": "text", + "content": "Figure 21: Task: Multi-concept customization. The goal is to generate images that effectively combine multiple visual concepts from reference images while aligning with a given textual description. Setup: Reference images are collected from prior works [103], and results are compared across GPT-4o, Gemini 2.0 Flash [99], and MS-Diffusion [103]. Each row includes the input reference images, text prompt, and the corresponding outputs. Observations: GPT-4o achieves competitive results in combining multiple visual concepts, showing strong fidelity to individual concepts and alignment with text prompts. However, its performance declines with unique or complex combinations. Despite this, GPT-4o outperforms Gemini 2.0 Flash and achieves results on par with SOTA methods." + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 175, + 364, + 257, + 445 + ], + "blocks": [ + { + "bbox": [ + 175, + 364, + 257, + 445 + ], + "lines": [ + { + "bbox": [ + 175, + 364, + 257, + 445 + ], + "spans": [ + { + "bbox": [ + 175, + 364, + 257, + 445 + ], + "type": "image", + "image_path": "b7a34b10991c1b9ad9cd6f27bad980e361343453a99acd05c8354db9560287b5.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 257, + 364, + 339, + 445 + ], + "blocks": [ + { + "bbox": [ + 257, + 364, + 339, + 445 + ], + "lines": [ + { + "bbox": [ + 257, + 364, + 339, + 445 + ], + "spans": [ + { + "bbox": [ + 257, + 364, + 339, + 445 + ], + "type": "image", + "image_path": "ab685c4d565c19e94d0af9de2b15ce72d5e4d59e9fb5279055cc29a626f1a509.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 339, + 364, + 421, + 445 + ], + "blocks": [ + { + "bbox": [ + 339, + 364, + 421, + 445 + ], + "lines": [ + { + "bbox": [ + 339, + 364, + 421, + 445 + ], + "spans": [ + { + "bbox": [ + 339, + 364, + 421, + 445 + ], + "type": "image", + "image_path": "e4a1fb76e31c6e6095d35539c83419338ff04dfd9cc1cb7d1e9c18ca4ecabfab.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 421, + 364, + 503, + 445 + ], + "blocks": [ + { + "bbox": [ + 421, + 364, + 503, + 445 + ], + "lines": [ + { + "bbox": [ + 421, + 364, + 503, + 445 + ], + "spans": [ + { + "bbox": [ + 421, + 364, + 503, + 445 + ], + "type": "image", + "image_path": "de9e123e1e1147256977150c0054e8e9a10a47b47074aa699979adc065b6358c.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "bbox": [ + 96, + 449, + 477, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 449, + 477, + 475 + ], + "spans": [ + { + "bbox": [ + 96, + 449, + 477, + 475 + ], + "type": "text", + "content": "Input Text: \"A backpack and a stuffed animal in the jungle, with reference to the attached images.\"" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 479, + 158, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 479, + 158, + 491 + ], + "spans": [ + { + "bbox": [ + 106, + 479, + 158, + 491 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 189, + 479, + 241, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 479, + 241, + 491 + ], + "spans": [ + { + "bbox": [ + 189, + 479, + 241, + 491 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 284, + 479, + 314, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 479, + 314, + 490 + ], + "spans": [ + { + "bbox": [ + 284, + 479, + 314, + 490 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 347, + 479, + 413, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 479, + 413, + 490 + ], + "spans": [ + { + "bbox": [ + 347, + 479, + 413, + 490 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 432, + 479, + 487, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 432, + 479, + 487, + 490 + ], + "spans": [ + { + "bbox": [ + 432, + 479, + 487, + 490 + ], + "type": "text", + "content": "MS-Diffusion" + } + ] + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 93, + 502, + 159, + 572 + ], + "blocks": [ + { + "bbox": [ + 93, + 502, + 159, + 572 + ], + "lines": [ + { + "bbox": [ + 93, + 502, + 159, + 572 + ], + "spans": [ + { + "bbox": [ + 93, + 502, + 159, + 572 + ], + "type": "image", + "image_path": "d617caa5b4e6e92b1428018122e2e89713f4978830db0e21a7ee846b88ce3163.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 160, + 502, + 229, + 572 + ], + "blocks": [ + { + "bbox": [ + 160, + 502, + 229, + 572 + ], + "lines": [ + { + "bbox": [ + 160, + 502, + 229, + 572 + ], + "spans": [ + { + "bbox": [ + 160, + 502, + 229, + 572 + ], + "type": "image", + "image_path": "896b8444337aa0e1e16c6afd486c4b4f5296517582f2b9dbf6bc16114d53d2ab.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 230, + 502, + 299, + 572 + ], + "blocks": [ + { + "bbox": [ + 230, + 502, + 299, + 572 + ], + "lines": [ + { + "bbox": [ + 230, + 502, + 299, + 572 + ], + "spans": [ + { + "bbox": [ + 230, + 502, + 299, + 572 + ], + "type": "image", + "image_path": "8217fbf0fc579e87f7d27802daffe76c3b9b1cdc80d7434cf08598ef201ca65b.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 299, + 502, + 366, + 572 + ], + "blocks": [ + { + "bbox": [ + 299, + 502, + 366, + 572 + ], + "lines": [ + { + "bbox": [ + 299, + 502, + 366, + 572 + ], + "spans": [ + { + "bbox": [ + 299, + 502, + 366, + 572 + ], + "type": "image", + "image_path": "e3951f420a6bebec8d982ae86c6d63b89a8e9d80db44d4a5af3d22ff2a0dabf6.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 367, + 502, + 436, + 572 + ], + "blocks": [ + { + "bbox": [ + 367, + 502, + 436, + 572 + ], + "lines": [ + { + "bbox": [ + 367, + 502, + 436, + 572 + ], + "spans": [ + { + "bbox": [ + 367, + 502, + 436, + 572 + ], + "type": "image", + "image_path": "0a9ad99bf92f46485bb850e72e6f3103fbdbc9cd19afa549dae69229296ce436.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 436, + 502, + 505, + 572 + ], + "blocks": [ + { + "bbox": [ + 436, + 502, + 505, + 572 + ], + "lines": [ + { + "bbox": [ + 436, + 502, + 505, + 572 + ], + "spans": [ + { + "bbox": [ + 436, + 502, + 505, + 572 + ], + "type": "image", + "image_path": "1e13756c7b144754ea9efd5eae0b5683845af5444f13a90c830d2492dfbd9bbd.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + } + ], + "index": 31 + }, + { + "bbox": [ + 96, + 575, + 504, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 575, + 504, + 601 + ], + "spans": [ + { + "bbox": [ + 96, + 575, + 504, + 601 + ], + "type": "text", + "content": "Input Text: \"A lantern, a clock, and a backpack on a cobblestone street, with reference to the attached images.\"" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 97, + 609, + 149, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 609, + 149, + 621 + ], + "spans": [ + { + "bbox": [ + 97, + 609, + 149, + 621 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 167, + 609, + 218, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 609, + 218, + 621 + ], + "spans": [ + { + "bbox": [ + 167, + 609, + 218, + 621 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 236, + 610, + 287, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 610, + 287, + 621 + ], + "spans": [ + { + "bbox": [ + 236, + 610, + 287, + 621 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 315, + 610, + 345, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 610, + 345, + 620 + ], + "spans": [ + { + "bbox": [ + 315, + 610, + 345, + 620 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 365, + 610, + 432, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 610, + 432, + 620 + ], + "spans": [ + { + "bbox": [ + 365, + 610, + 432, + 620 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 441, + 610, + 497, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 441, + 610, + 497, + 620 + ], + "spans": [ + { + "bbox": [ + 441, + 610, + 497, + 620 + ], + "type": "text", + "content": "MS-Diffusion" + } + ] + } + ], + "index": 38 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 40 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 215, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 215, + 85 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 215, + 85 + ], + "type": "text", + "content": "2.2.4 Story Image Generation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 91, + 533, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 91, + 533, + 137 + ], + "spans": [ + { + "bbox": [ + 77, + 91, + 533, + 137 + ], + "type": "text", + "content": "Story image generation is a task to generate coherent stories based on input text narratives. The conditions may also include the first story frame or character images. We choose Gemini 2.0 Flash [99], StoryDiffusion [38], SEED-Story [111], and DiffSensei [108] as baselines, due to their proven ability to generate coherent and expressive story images and their public availability. The results are shown in Figure 22 and Figure 23." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 140, + 533, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 140, + 533, + 272 + ], + "spans": [ + { + "bbox": [ + 77, + 140, + 533, + 272 + ], + "type": "text", + "content": "In the first example, GPT-4o and StoryDiffusion successfully generate a three-panel short story about a fisherman, whereas Gemini 2.0 Flash fails by producing a single panel that appears to combine the three story narratives. In the second example, the story narrative is longer, spanning 11 panels. To evaluate this scenario with GPT-4o, we instruct the model to generate story images sequentially—using the input image and all previously generated images along with the corresponding text prompts. As shown in the figure, GPT-4o is capable of generating a long story with consistency. In the final example, we examine a Japanese black-and-white manga style with multiple input character images. GPT-4o is able to generate coherent stories, though it exhibits minor errors in character consistency (notably with the depiction of the woman) and misalignment with the input narrative (the narrative requires 7 panels, but only 6 are generated). The baseline Gemini 2.0 Flash performs worse, failing to preserve character status and the correct number of panels, as it also produces only 6 panels. Conversely, the DiffSensei model demonstrates superior performance, likely due to its specialized design and training for Japanese black-and-white manga generation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 277, + 533, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 277, + 533, + 311 + ], + "spans": [ + { + "bbox": [ + 77, + 277, + 533, + 311 + ], + "type": "text", + "content": "In conclusion, while GPT-4o achieves comparable performance to current baselines in story image generation, it shows limitations in specific scenarios—such as Japanese black-and-white manga and precise character status preservation—when compared to methods specifically tailored for those tasks." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 114, + 221, + 126 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 114, + 221, + 126 + ], + "spans": [ + { + "bbox": [ + 104, + 114, + 221, + 126 + ], + "type": "text", + "content": "Story Image Generation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 219, + 134, + 392, + 150 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 134, + 392, + 150 + ], + "spans": [ + { + "bbox": [ + 219, + 134, + 392, + 150 + ], + "type": "text", + "content": "Evaluation: Subject Consistency." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 96, + 156, + 211, + 224 + ], + "blocks": [ + { + "bbox": [ + 96, + 156, + 211, + 224 + ], + "lines": [ + { + "bbox": [ + 96, + 156, + 211, + 224 + ], + "spans": [ + { + "bbox": [ + 96, + 156, + 211, + 224 + ], + "type": "image", + "image_path": "9f84d4d4f227df36ae2d8eb29101d274a1877c545a2468371a5088741bc3cd8f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 138, + 227, + 171, + 236 + ], + "lines": [ + { + "bbox": [ + 138, + 227, + 171, + 236 + ], + "spans": [ + { + "bbox": [ + 138, + 227, + 171, + 236 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 216, + 156, + 284, + 224 + ], + "blocks": [ + { + "bbox": [ + 216, + 156, + 284, + 224 + ], + "lines": [ + { + "bbox": [ + 216, + 156, + 284, + 224 + ], + "spans": [ + { + "bbox": [ + 216, + 156, + 284, + 224 + ], + "type": "image", + "image_path": "821b29da41b0b8024feaf0f1d198abc77e613c7dd6beb41244e3ee04340d25a7.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 286, + 227, + 353, + 239 + ], + "lines": [ + { + "bbox": [ + 286, + 227, + 353, + 239 + ], + "spans": [ + { + "bbox": [ + 286, + 227, + 353, + 239 + ], + "type": "text", + "content": "StoryDiffusion" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 286, + 156, + 354, + 224 + ], + "blocks": [ + { + "bbox": [ + 286, + 156, + 354, + 224 + ], + "lines": [ + { + "bbox": [ + 286, + 156, + 354, + 224 + ], + "spans": [ + { + "bbox": [ + 286, + 156, + 354, + 224 + ], + "type": "image", + "image_path": "40ffe89df9c50ac3b8034fed7000e9df015d18b39c605d05fc2cb755261800bb.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 356, + 156, + 424, + 224 + ], + "blocks": [ + { + "bbox": [ + 356, + 156, + 424, + 224 + ], + "lines": [ + { + "bbox": [ + 356, + 156, + 424, + 224 + ], + "spans": [ + { + "bbox": [ + 356, + 156, + 424, + 224 + ], + "type": "image", + "image_path": "7c0c2c96243ce0e8cc18e9a4df91dbe8e7777df73105a2ceb18d177a514d9735.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 428, + 156, + 513, + 224 + ], + "blocks": [ + { + "bbox": [ + 428, + 156, + 513, + 224 + ], + "lines": [ + { + "bbox": [ + 428, + 156, + 513, + 224 + ], + "spans": [ + { + "bbox": [ + 428, + 156, + 513, + 224 + ], + "type": "image", + "image_path": "73cf810872486276d591d5946a9ef3dc5549af56b3140dbc1d46cd76cf11381f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 435, + 227, + 508, + 237 + ], + "lines": [ + { + "bbox": [ + 435, + 227, + 508, + 237 + ], + "spans": [ + { + "bbox": [ + 435, + 227, + 508, + 237 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 103, + 247, + 159, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 247, + 159, + 258 + ], + "spans": [ + { + "bbox": [ + 103, + 247, + 159, + 258 + ], + "type": "text", + "content": "Input Text:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 103, + 258, + 195, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 258, + 195, + 269 + ], + "spans": [ + { + "bbox": [ + 103, + 258, + 195, + 269 + ], + "type": "text", + "content": "\"Draw a story about:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 103, + 269, + 326, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 269, + 326, + 280 + ], + "spans": [ + { + "bbox": [ + 103, + 269, + 326, + 280 + ], + "type": "text", + "content": "An old fisherman in a cable-knit sweater and boots" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 103, + 281, + 242, + 314 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 103, + 281, + 214, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 281, + 214, + 292 + ], + "spans": [ + { + "bbox": [ + 103, + 281, + 214, + 292 + ], + "type": "text", + "content": "1. Laying out a picnic solo" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 103, + 292, + 212, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 292, + 212, + 303 + ], + "spans": [ + { + "bbox": [ + 103, + 292, + 212, + 303 + ], + "type": "text", + "content": "2. Rowing a boat at dawn" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 103, + 303, + 242, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 303, + 242, + 314 + ], + "spans": [ + { + "bbox": [ + 103, + 303, + 242, + 314 + ], + "type": "text", + "content": "3. Stargazing with a telescope\"." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 97, + 323, + 213, + 440 + ], + "blocks": [ + { + "bbox": [ + 97, + 323, + 213, + 440 + ], + "lines": [ + { + "bbox": [ + 97, + 323, + 213, + 440 + ], + "spans": [ + { + "bbox": [ + 97, + 323, + 213, + 440 + ], + "type": "image", + "image_path": "9aa5032b866731f07d0e37da2dd1017f2e4122d3413aa5ce54be7e8880b33931.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 127, + 442, + 183, + 454 + ], + "lines": [ + { + "bbox": [ + 127, + 442, + 183, + 454 + ], + "spans": [ + { + "bbox": [ + 127, + 442, + 183, + 454 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 221, + 323, + 271, + 369 + ], + "blocks": [ + { + "bbox": [ + 221, + 323, + 271, + 369 + ], + "lines": [ + { + "bbox": [ + 221, + 323, + 271, + 369 + ], + "spans": [ + { + "bbox": [ + 221, + 323, + 271, + 369 + ], + "type": "image", + "image_path": "9b0a74f7db3b0699ac94506c383fb22b1af1f2bd911d7fa4312ee9ca289612ef.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 221, + 369, + 277, + 403 + ], + "blocks": [ + { + "bbox": [ + 221, + 369, + 277, + 403 + ], + "lines": [ + { + "bbox": [ + 221, + 369, + 277, + 403 + ], + "spans": [ + { + "bbox": [ + 221, + 369, + 277, + 403 + ], + "type": "image", + "image_path": "ea5e48f5561ad6c4fe1193a647524607970cfe7cdc63cd079944cf82880cecfe.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 221, + 404, + 261, + 438 + ], + "blocks": [ + { + "bbox": [ + 221, + 404, + 261, + 438 + ], + "lines": [ + { + "bbox": [ + 221, + 404, + 261, + 438 + ], + "spans": [ + { + "bbox": [ + 221, + 404, + 261, + 438 + ], + "type": "image", + "image_path": "d68465b01fad9263615792827618d6bc3ea2ac4994279661c281ea4561a93647.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 272, + 323, + 321, + 369 + ], + "blocks": [ + { + "bbox": [ + 272, + 323, + 321, + 369 + ], + "lines": [ + { + "bbox": [ + 272, + 323, + 321, + 369 + ], + "spans": [ + { + "bbox": [ + 272, + 323, + 321, + 369 + ], + "type": "image", + "image_path": "48601f43a53bfc38e48cf7cca862cb1f50808dd99f5c4af851716147ddce8e55.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 272, + 369, + 335, + 403 + ], + "blocks": [ + { + "bbox": [ + 272, + 369, + 335, + 403 + ], + "lines": [ + { + "bbox": [ + 272, + 369, + 335, + 403 + ], + "spans": [ + { + "bbox": [ + 272, + 369, + 335, + 403 + ], + "type": "image", + "image_path": "c03c17cff30fad0446b3d264e031d30a0f68b4ac841df4e2880f2381e0399ca9.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 272, + 404, + 301, + 438 + ], + "blocks": [ + { + "bbox": [ + 272, + 404, + 301, + 438 + ], + "lines": [ + { + "bbox": [ + 272, + 404, + 301, + 438 + ], + "spans": [ + { + "bbox": [ + 272, + 404, + 301, + 438 + ], + "type": "image", + "image_path": "c8ad21ac9388fd5c3e7f05f8a3c2192c73560b7737abef6737af98782aedeab8.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 279, + 444, + 311, + 454 + ], + "lines": [ + { + "bbox": [ + 279, + 444, + 311, + 454 + ], + "spans": [ + { + "bbox": [ + 279, + 444, + 311, + 454 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 322, + 323, + 372, + 369 + ], + "blocks": [ + { + "bbox": [ + 322, + 323, + 372, + 369 + ], + "lines": [ + { + "bbox": [ + 322, + 323, + 372, + 369 + ], + "spans": [ + { + "bbox": [ + 322, + 323, + 372, + 369 + ], + "type": "image", + "image_path": "85f94fe9cc62a3b120dd03edaa6ef6f69b02976c1603bf7df955e05359a6cb30.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 322, + 369, + 372, + 403 + ], + "blocks": [ + { + "bbox": [ + 322, + 369, + 372, + 403 + ], + "lines": [ + { + "bbox": [ + 322, + 369, + 372, + 403 + ], + "spans": [ + { + "bbox": [ + 322, + 369, + 372, + 403 + ], + "type": "image", + "image_path": "1c6fcddb57e3da5cf397a2753308ce3877a635582a5355e1b66875d5fb6b8636.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 322, + 404, + 372, + 438 + ], + "blocks": [ + { + "bbox": [ + 322, + 404, + 372, + 438 + ], + "lines": [ + { + "bbox": [ + 322, + 404, + 372, + 438 + ], + "spans": [ + { + "bbox": [ + 322, + 404, + 372, + 438 + ], + "type": "image", + "image_path": "0419b8779eb79040a7d1330971acc7342fec2dd90c7626958522a6fb5f0fcf8e.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 382, + 326, + 411, + 357 + ], + "blocks": [ + { + "bbox": [ + 382, + 326, + 411, + 357 + ], + "lines": [ + { + "bbox": [ + 382, + 326, + 411, + 357 + ], + "spans": [ + { + "bbox": [ + 382, + 326, + 411, + 357 + ], + "type": "image", + "image_path": "505a6db0b4602aea76566f436b70be7da6453677d5c873c5267b35cc43b24a51.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 382, + 357, + 411, + 381 + ], + "blocks": [ + { + "bbox": [ + 382, + 357, + 411, + 381 + ], + "lines": [ + { + "bbox": [ + 382, + 357, + 411, + 381 + ], + "spans": [ + { + "bbox": [ + 382, + 357, + 411, + 381 + ], + "type": "image", + "image_path": "ccf71f9041dc2d2f4458a2e0113d7a09f60467f15834ca798f39accb6bbf34e8.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 382, + 381, + 411, + 402 + ], + "blocks": [ + { + "bbox": [ + 382, + 381, + 411, + 402 + ], + "lines": [ + { + "bbox": [ + 382, + 381, + 411, + 402 + ], + "spans": [ + { + "bbox": [ + 382, + 381, + 411, + 402 + ], + "type": "image", + "image_path": "bcd70602d56a2f46a9663b8b18aa7be1f8cc70645914af87662cc424f8bd98e1.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 382, + 402, + 411, + 439 + ], + "blocks": [ + { + "bbox": [ + 382, + 402, + 411, + 439 + ], + "lines": [ + { + "bbox": [ + 382, + 402, + 411, + 439 + ], + "spans": [ + { + "bbox": [ + 382, + 402, + 411, + 439 + ], + "type": "image", + "image_path": "a1117bdf2682b8d7115f12b7af60c126adce2d44e56036f3f9677ed0f6fa8cda.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 419, + 444, + 474, + 456 + ], + "lines": [ + { + "bbox": [ + 419, + 444, + 474, + 456 + ], + "spans": [ + { + "bbox": [ + 419, + 444, + 474, + 456 + ], + "type": "text", + "content": "SEED-Story" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_caption" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 414, + 326, + 444, + 363 + ], + "blocks": [ + { + "bbox": [ + 414, + 326, + 444, + 363 + ], + "lines": [ + { + "bbox": [ + 414, + 326, + 444, + 363 + ], + "spans": [ + { + "bbox": [ + 414, + 326, + 444, + 363 + ], + "type": "image", + "image_path": "5daa456ff2e454b0f91f88203eed2a998e30a44e393325bd1cc55a49937d3939.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 414, + 364, + 445, + 402 + ], + "blocks": [ + { + "bbox": [ + 414, + 364, + 445, + 402 + ], + "lines": [ + { + "bbox": [ + 414, + 364, + 445, + 402 + ], + "spans": [ + { + "bbox": [ + 414, + 364, + 445, + 402 + ], + "type": "image", + "image_path": "073d5da23f5b7f75f55840c7e941a2744edbcb0a242ace828bcaf195e2c9978e.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 414, + 402, + 445, + 439 + ], + "blocks": [ + { + "bbox": [ + 414, + 402, + 445, + 439 + ], + "lines": [ + { + "bbox": [ + 414, + 402, + 445, + 439 + ], + "spans": [ + { + "bbox": [ + 414, + 402, + 445, + 439 + ], + "type": "image", + "image_path": "aa32d16435cd0e8550916c3f311f8776216cf8e0260b722c1418d0d7619083ed.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 448, + 326, + 479, + 363 + ], + "blocks": [ + { + "bbox": [ + 448, + 326, + 479, + 363 + ], + "lines": [ + { + "bbox": [ + 448, + 326, + 479, + 363 + ], + "spans": [ + { + "bbox": [ + 448, + 326, + 479, + 363 + ], + "type": "image", + "image_path": "a492c7a006b896e7436ce9df3705f82414a33538f91a0c7ba1f64667941a3dc1.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 448, + 364, + 479, + 402 + ], + "blocks": [ + { + "bbox": [ + 448, + 364, + 479, + 402 + ], + "lines": [ + { + "bbox": [ + 448, + 364, + 479, + 402 + ], + "spans": [ + { + "bbox": [ + 448, + 364, + 479, + 402 + ], + "type": "image", + "image_path": "0abc90b5e3bcbd0cb683654abfcc56966ce72e50c6e2cbbe3c61540b34a5b998.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 448, + 402, + 479, + 439 + ], + "blocks": [ + { + "bbox": [ + 448, + 402, + 479, + 439 + ], + "lines": [ + { + "bbox": [ + 448, + 402, + 479, + 439 + ], + "spans": [ + { + "bbox": [ + 448, + 402, + 479, + 439 + ], + "type": "image", + "image_path": "36ad8877a81602152fef02eb279de3452a2d3bcd29e3cca2508e6eb27da3dd21.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 482, + 326, + 512, + 363 + ], + "blocks": [ + { + "bbox": [ + 482, + 326, + 512, + 363 + ], + "lines": [ + { + "bbox": [ + 482, + 326, + 512, + 363 + ], + "spans": [ + { + "bbox": [ + 482, + 326, + 512, + 363 + ], + "type": "image", + "image_path": "e9288a0c02092fd53245823396ca1f76b2df5abca1ee697a871b5004f18dce58.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 482, + 364, + 512, + 402 + ], + "blocks": [ + { + "bbox": [ + 482, + 364, + 512, + 402 + ], + "lines": [ + { + "bbox": [ + 482, + 364, + 512, + 402 + ], + "spans": [ + { + "bbox": [ + 482, + 364, + 512, + 402 + ], + "type": "image", + "image_path": "aba75b8ad4c326892ca96755807604f259178d54af19a83236adab4115951198.jpg" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_body" + } + ], + "index": 40 + }, + { + "type": "image", + "bbox": [ + 482, + 402, + 512, + 439 + ], + "blocks": [ + { + "bbox": [ + 482, + 402, + 512, + 439 + ], + "lines": [ + { + "bbox": [ + 482, + 402, + 512, + 439 + ], + "spans": [ + { + "bbox": [ + 482, + 402, + 512, + 439 + ], + "type": "image", + "image_path": "b13f052399947f2c4da3a2882a7878f31fc79514f3fe8a809951a32ca1140118.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + } + ], + "index": 41 + }, + { + "bbox": [ + 103, + 462, + 159, + 473 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 462, + 159, + 473 + ], + "spans": [ + { + "bbox": [ + 103, + 462, + 159, + 473 + ], + "type": "text", + "content": "Input Text:" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 103, + 474, + 503, + 609 + ], + "type": "list", + "angle": 0, + "index": 56, + "blocks": [ + { + "bbox": [ + 103, + 474, + 273, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 474, + 273, + 485 + ], + "spans": [ + { + "bbox": [ + 103, + 474, + 273, + 485 + ], + "type": "text", + "content": "\"Draw a story about George, a monkey:" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 103, + 485, + 482, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 485, + 482, + 496 + ], + "spans": [ + { + "bbox": [ + 103, + 485, + 482, + 496 + ], + "type": "text", + "content": "1. He looked around with a curious expression, wondering what adventures awaited him." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 104, + 496, + 266, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 496, + 266, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 496, + 266, + 506 + ], + "type": "text", + "content": "2. Suddenly, George heard a noise. ..." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 104, + 506, + 405, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 405, + 518 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 405, + 518 + ], + "type": "text", + "content": "3. To his surprise, the noise was George's friend, a small brown dog ..." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 104, + 518, + 488, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 518, + 488, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 518, + 488, + 529 + ], + "type": "text", + "content": "4. George and the dog then played a game of hide and seek. George hid behind a couch ..." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 104, + 530, + 393, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 530, + 393, + 541 + ], + "spans": [ + { + "bbox": [ + 104, + 530, + 393, + 541 + ], + "type": "text", + "content": "5. The next day, George and the dog decided to explore the city ..." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 104, + 541, + 377, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 377, + 553 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 377, + 553 + ], + "type": "text", + "content": "6. George stopped on the city sidewalk, looking up at the sky ..." + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 104, + 553, + 356, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 553, + 356, + 563 + ], + "spans": [ + { + "bbox": [ + 104, + 553, + 356, + 563 + ], + "type": "text", + "content": "7. George then noticed a building with a reflective glass ..." + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 104, + 563, + 467, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 563, + 467, + 574 + ], + "spans": [ + { + "bbox": [ + 104, + 563, + 467, + 574 + ], + "type": "text", + "content": "8. George and the dog stood in front of the building, looking up at the lit windows ..." + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 104, + 574, + 419, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 574, + 419, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 574, + 419, + 586 + ], + "type": "text", + "content": "9. They were in a room with a door, waiting for their friend to join them" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 104, + 586, + 402, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 402, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 402, + 597 + ], + "type": "text", + "content": "10. Suddenly, the door opened, and a man in a yellow suit walked in ..." + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 104, + 597, + 503, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 597, + 503, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 597, + 503, + 609 + ], + "type": "text", + "content": "11. He seemed deep in thought, unaware of George and the dog watching him from below ...\"." + } + ] + } + ], + "index": 55 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 77, + 628, + 533, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 628, + 533, + 684 + ], + "spans": [ + { + "bbox": [ + 77, + 628, + 533, + 684 + ], + "type": "text", + "content": "Figure 22: Task: Story image generation. The goal is to generate coherent story sequences based on narrative text, optionally conditioned on initial story frames or character images. Setup: Each example combines an input narrative (and, when available, reference character images) with a series of generated story panels. We compare outputs from GPT-4o against Gemini 2.0 Flash [99], StoryDiffusion [38], and SEED-Story [111]. Observations: GPT-4o exhibits strong narrative coherence and panel continuity, matching or surpassing general baselines." + } + ] + } + ], + "index": 57 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 58 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "bbox": [ + 103, + 175, + 220, + 189 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 175, + 220, + 189 + ], + "spans": [ + { + "bbox": [ + 103, + 175, + 220, + 189 + ], + "type": "text", + "content": "Story Image Generation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 233, + 199, + 391, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 199, + 391, + 213 + ], + "spans": [ + { + "bbox": [ + 233, + 199, + 391, + 213 + ], + "type": "text", + "content": "Evaluation: Subject Consistency." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 97, + 217, + 145, + 270 + ], + "blocks": [ + { + "bbox": [ + 97, + 217, + 145, + 270 + ], + "lines": [ + { + "bbox": [ + 97, + 217, + 145, + 270 + ], + "spans": [ + { + "bbox": [ + 97, + 217, + 145, + 270 + ], + "type": "image", + "image_path": "45c315f9d4b26f49ce32764e260ffb24ab385bf338d6917e2d37d6b26b242016.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 99, + 270, + 145, + 322 + ], + "blocks": [ + { + "bbox": [ + 99, + 270, + 145, + 322 + ], + "lines": [ + { + "bbox": [ + 99, + 270, + 145, + 322 + ], + "spans": [ + { + "bbox": [ + 99, + 270, + 145, + 322 + ], + "type": "image", + "image_path": "f4ad4ea5d8987493e7a7c6a58f347e56802ce312612d8ea2a144cda053cdf994.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 99, + 323, + 145, + 376 + ], + "blocks": [ + { + "bbox": [ + 99, + 323, + 145, + 376 + ], + "lines": [ + { + "bbox": [ + 99, + 323, + 145, + 376 + ], + "spans": [ + { + "bbox": [ + 99, + 323, + 145, + 376 + ], + "type": "image", + "image_path": "084f24a984613e1c05f7c5e2fa9817167a1fbac88e5cd473c3386ae1bd4b9322.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 91, + 380, + 151, + 392 + ], + "lines": [ + { + "bbox": [ + 91, + 380, + 151, + 392 + ], + "spans": [ + { + "bbox": [ + 91, + 380, + 151, + 392 + ], + "type": "text", + "content": "Input Images" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 155, + 219, + 257, + 267 + ], + "blocks": [ + { + "bbox": [ + 155, + 219, + 257, + 267 + ], + "lines": [ + { + "bbox": [ + 155, + 219, + 257, + 267 + ], + "spans": [ + { + "bbox": [ + 155, + 219, + 257, + 267 + ], + "type": "image", + "image_path": "536ebeb2091f4d9765dfff67b32c88a8b7e2f773ca113d0bf1fe677432f050ca.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 155, + 268, + 256, + 319 + ], + "blocks": [ + { + "bbox": [ + 155, + 268, + 256, + 319 + ], + "lines": [ + { + "bbox": [ + 155, + 268, + 256, + 319 + ], + "spans": [ + { + "bbox": [ + 155, + 268, + 256, + 319 + ], + "type": "image", + "image_path": "89edb9b677a275a49f6618e18a5f5744c28971461e4544202201e438f92139f3.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 155, + 321, + 256, + 374 + ], + "blocks": [ + { + "bbox": [ + 155, + 321, + 256, + 374 + ], + "lines": [ + { + "bbox": [ + 155, + 321, + 256, + 374 + ], + "spans": [ + { + "bbox": [ + 155, + 321, + 256, + 374 + ], + "type": "image", + "image_path": "186181b2b63acba5d62a3720407b17286c90fad94c34b8ea5cef23a65cfbf1ec.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 189, + 380, + 222, + 389 + ], + "lines": [ + { + "bbox": [ + 189, + 380, + 222, + 389 + ], + "spans": [ + { + "bbox": [ + 189, + 380, + 222, + 389 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 261, + 217, + 372, + 269 + ], + "blocks": [ + { + "bbox": [ + 261, + 217, + 372, + 269 + ], + "lines": [ + { + "bbox": [ + 261, + 217, + 372, + 269 + ], + "spans": [ + { + "bbox": [ + 261, + 217, + 372, + 269 + ], + "type": "image", + "image_path": "28bf76b0adb60b3e53902f81e3d33090021fb2e1147bd03fb08fac858f7ac972.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 261, + 270, + 371, + 325 + ], + "blocks": [ + { + "bbox": [ + 261, + 270, + 371, + 325 + ], + "lines": [ + { + "bbox": [ + 261, + 270, + 371, + 325 + ], + "spans": [ + { + "bbox": [ + 261, + 270, + 371, + 325 + ], + "type": "image", + "image_path": "9bba0dc289d8ec29ff37642b0bc4d84feafbf8e7b361e6e17186d7b033c6b5be.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 262, + 325, + 371, + 375 + ], + "blocks": [ + { + "bbox": [ + 262, + 325, + 371, + 375 + ], + "lines": [ + { + "bbox": [ + 262, + 325, + 371, + 375 + ], + "spans": [ + { + "bbox": [ + 262, + 325, + 371, + 375 + ], + "type": "image", + "image_path": "8ef5824e83d6803a07b614317f4d908c06c18331051e08b8eef6e454c1be8073.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 276, + 380, + 348, + 390 + ], + "lines": [ + { + "bbox": [ + 276, + 380, + 348, + 390 + ], + "spans": [ + { + "bbox": [ + 276, + 380, + 348, + 390 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 375, + 218, + 509, + 262 + ], + "blocks": [ + { + "bbox": [ + 375, + 218, + 509, + 262 + ], + "lines": [ + { + "bbox": [ + 375, + 218, + 509, + 262 + ], + "spans": [ + { + "bbox": [ + 375, + 218, + 509, + 262 + ], + "type": "image", + "image_path": "6043908cb4a51385380e40a2264f62f78e48ba2368469bbca5ad6f2dde9482cf.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 375, + 263, + 508, + 310 + ], + "blocks": [ + { + "bbox": [ + 375, + 263, + 508, + 310 + ], + "lines": [ + { + "bbox": [ + 375, + 263, + 508, + 310 + ], + "spans": [ + { + "bbox": [ + 375, + 263, + 508, + 310 + ], + "type": "image", + "image_path": "94c4c44bd6d4d882958c8eed91e70b3ca5d10dea1f870d92a7661befb223ea99.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 375, + 311, + 508, + 374 + ], + "blocks": [ + { + "bbox": [ + 375, + 311, + 508, + 374 + ], + "lines": [ + { + "bbox": [ + 375, + 311, + 508, + 374 + ], + "spans": [ + { + "bbox": [ + 375, + 311, + 508, + 374 + ], + "type": "image", + "image_path": "072676f5a96fdb59a2c5611f9262a740e1f39908971d09d9835789f46d395625.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 417, + 380, + 466, + 390 + ], + "lines": [ + { + "bbox": [ + 417, + 380, + 466, + 390 + ], + "spans": [ + { + "bbox": [ + 417, + 380, + 466, + 390 + ], + "type": "text", + "content": "DiffSensei" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 77, + 555, + 533, + 622 + ], + "lines": [ + { + "bbox": [ + 77, + 555, + 533, + 622 + ], + "spans": [ + { + "bbox": [ + 77, + 555, + 533, + 622 + ], + "type": "text", + "content": "Figure 23: Task: Story image generation. The goal is to generate coherent story sequences based on narrative text, optionally conditioned on initial story frames or character images. Setup: Each example combines an input narrative (and, when available, reference character images) with a series of generated story panels. We compare outputs from GPT-4o against baselines including Gemini 2.0 Flash [99] and DiffSensei [108]. Observations: GPT-4o shows minor shortcomings in precise character consistency and panel count in specialized contexts, such as Japanese black-and-white manga, where dedicated models like DiffSensei deliver superior performance." + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 97, + 406, + 152, + 416 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 406, + 152, + 416 + ], + "spans": [ + { + "bbox": [ + 97, + 406, + 152, + 416 + ], + "type": "text", + "content": "Input Text:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 97, + 418, + 509, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 418, + 509, + 439 + ], + "spans": [ + { + "bbox": [ + 97, + 418, + 509, + 439 + ], + "type": "text", + "content": "\"Please generate a black-and-white manga using the given characters (a young man, a child, and a woman). Each panel may appear 0-3 characters." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 97, + 440, + 511, + 529 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 97, + 440, + 454, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 440, + 454, + 451 + ], + "spans": [ + { + "bbox": [ + 97, + 440, + 454, + 451 + ], + "type": "text", + "content": "1. A man is lying on the floor surrounded by books and papers, with a radio nearby." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 97, + 451, + 511, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 451, + 511, + 473 + ], + "spans": [ + { + "bbox": [ + 97, + 451, + 511, + 473 + ], + "type": "text", + "content": "2. A woman with curly hair is smiling. She's wearing a patterned shirt and apron. She's holding a baby." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 97, + 474, + 483, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 474, + 483, + 484 + ], + "spans": [ + { + "bbox": [ + 97, + 474, + 483, + 484 + ], + "type": "text", + "content": "3. A man with a surprised expression, his mouth open as if he's about to shout or scream." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 97, + 485, + 415, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 485, + 415, + 496 + ], + "spans": [ + { + "bbox": [ + 97, + 485, + 415, + 496 + ], + "type": "text", + "content": "4. A young man with a surprised expression, is holding a baby on his back." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 97, + 496, + 338, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 496, + 338, + 506 + ], + "spans": [ + { + "bbox": [ + 97, + 496, + 338, + 506 + ], + "type": "text", + "content": "5. A man is holding a baby. The man's hair is disheveled." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 97, + 507, + 416, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 507, + 416, + 517 + ], + "spans": [ + { + "bbox": [ + 97, + 507, + 416, + 517 + ], + "type": "text", + "content": "6. A man with a surprised expression. His eyes wide and eyebrows raised." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 97, + 518, + 504, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 518, + 504, + 529 + ], + "spans": [ + { + "bbox": [ + 97, + 518, + 504, + 529 + ], + "type": "text", + "content": "7. A man carrying a child on his back walk up a staircase. The man is wearing a stripped shirt\"." + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 181, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 181, + 83 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 181, + 83 + ], + "type": "text", + "content": "2.2.5 Low-level Vision" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 91, + 532, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 91, + 532, + 190 + ], + "spans": [ + { + "bbox": [ + 76, + 91, + 532, + 190 + ], + "type": "text", + "content": "Low-level vision tasks aim to enhance the basic quality or detail of visual content by improving various aspects of an image. Initial methods often focused on optimizing single tasks, such as super-resolution [88, 95], denoising [61, 63, 55], restoration [60, 20, 62, 84, 15, 16, 17], color adjustment [59], and more [22, 66, 116, 1, 122]. As the technology progressed, subsequent approaches expanded these techniques to handle multiple low-level tasks simultaneously, which is called universal image restoration. Low-level tasks play a critical role in image generation and editing, allowing visual generative models to provide higher-quality outputs in real-world applications. By enabling models to adapt to diverse inputs, they ensure that the generated images perform well across different visual tasks. This is especially important in areas such as image restoration and video enhancement, where high-precision visual content optimization is crucial, such as in film post-production and autonomous driving." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 76, + 194, + 531, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 194, + 531, + 294 + ], + "spans": [ + { + "bbox": [ + 76, + 194, + 531, + 294 + ], + "type": "text", + "content": "We evaluate the performance of GPT-4o in this challenging task. Firstly, for some image restoration tasks, such as super resolution, denoising, deraining, low-light enhancement, deblurring and dehazing. We collect reference images from previous relevant works Gemini 2.0 Flash and a universal image restoration model, InstructIR [20], as shown in Figures 24, 25, 26, 27, 28, 29, 33, 34. In most scenarios, GPT-4o guarantees high-quality output images, outperforming Gemini 2.0 Flash. However, there are still some degradation issues that are difficult to remove, as seen in the second image of the image denoising task. On the other hand, for low-level image restoration tasks, maintaining pixel consistency between the output and input images is crucial. GPT-4o does not perform well in this regard, as the content of many images changes. In contrast, InstructIR, designed specifically for image restoration, performs better, effectively removing degradation while maintaining pixel consistency throughout." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 298, + 531, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 298, + 531, + 397 + ], + "spans": [ + { + "bbox": [ + 76, + 298, + 531, + 397 + ], + "type": "text", + "content": "For image inpainting and outpainting in Figure 30, 31. We compared Gemini 2.0 Flash with the latest inpainting and outpainting methods [66, 116, 22, 1]. Only the missing information needs to be completed, but GPT-4o still changes the undesired content of the image. Although the output image quality is higher, this is not ideal for evaluating the task itself. For human face inpainting, compared to the other two methods, the overall artistic style is more natural. For the colorization, we choose the latest colorization model CtrlColor [59]. The overall style is somewhat dark in Figure 32. Compared to Gemini 2.0 Flash, GPT-4o's colors are more natural and consistent with the style. However, there are some inaccuracies in color control. For example, in the second image, the cat's color is not white as specified in the text. Additionally, GPT-4o still exhibits issues with changes in image content, such as the shape of the human's face in the fourth image." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 76, + 402, + 531, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 402, + 531, + 490 + ], + "spans": [ + { + "bbox": [ + 76, + 402, + 531, + 490 + ], + "type": "text", + "content": "For the image re-lighting task in Figure 35, GPT-4o performs well in applying realistic lighting and shadows, with natural color tones that match the scene. However, it occasionally struggles with maintaining light consistency, particularly in complex lighting scenarios, such as neon or vibrant lights. Compared to Gemini 2.0 Flash, GPT-4o produces more natural and consistent results, but it doesn't always accurately replicate the lighting effects as seen in the second image, where the neon lighting could have been better captured. IC-Light [122] is effective in applying realistic lighting, but tends to lose detail in some complex objects or faces under different light conditions. Overall, GPT-4o is a strong contender for the image re-light task, providing good light consistency but leaving room for improvement in some specific scenarios." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 76, + 495, + 531, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 495, + 531, + 551 + ], + "spans": [ + { + "bbox": [ + 76, + 495, + 531, + 551 + ], + "type": "text", + "content": "In summary, GPT-4o demonstrates strong performance in various low-level vision tasks, often surpassing Gemini 2.0 Flash in output quality with more natural and visually appealing results. However, it struggles with maintaining pixel consistency and avoiding undesired changes to image content, which are critical for tasks like restoration and inpainting. While its adaptability and realism are impressive, there is room for improvement in precision and task-specific consistency compared to specialized models like InstructIR and IC-Light." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 78, + 177, + 90 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 78, + 177, + 90 + ], + "spans": [ + { + "bbox": [ + 133, + 78, + 177, + 90 + ], + "type": "text", + "content": "Denoising" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 236, + 94, + 247, + 108 + ], + "blocks": [ + { + "bbox": [ + 236, + 94, + 247, + 108 + ], + "lines": [ + { + "bbox": [ + 236, + 94, + 247, + 108 + ], + "spans": [ + { + "bbox": [ + 236, + 94, + 247, + 108 + ], + "type": "image", + "image_path": "1a4731938b542638df6ad94c956073db2b548c15e4b1b8114d5307a10c99ea73.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 248, + 97, + 368, + 109 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 248, + 97, + 368, + 109 + ], + "spans": [ + { + "bbox": [ + 248, + 97, + 368, + 109 + ], + "type": "text", + "content": "Evaluation: Image Quality." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 109, + 114, + 202, + 204 + ], + "blocks": [ + { + "bbox": [ + 109, + 114, + 202, + 204 + ], + "lines": [ + { + "bbox": [ + 109, + 114, + 202, + 204 + ], + "spans": [ + { + "bbox": [ + 109, + 114, + 202, + 204 + ], + "type": "image", + "image_path": "0a89af2a4699039f927a01b35f74a721a10ddc2eb096385ea8edf582d6c03d50.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 208, + 114, + 301, + 204 + ], + "blocks": [ + { + "bbox": [ + 208, + 114, + 301, + 204 + ], + "lines": [ + { + "bbox": [ + 208, + 114, + 301, + 204 + ], + "spans": [ + { + "bbox": [ + 208, + 114, + 301, + 204 + ], + "type": "image", + "image_path": "1beb76da0b2a004a33d76279d80831d9cbb3917b5daee0d2aa13d05feb493089.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 308, + 114, + 402, + 204 + ], + "blocks": [ + { + "bbox": [ + 308, + 114, + 402, + 204 + ], + "lines": [ + { + "bbox": [ + 308, + 114, + 402, + 204 + ], + "spans": [ + { + "bbox": [ + 308, + 114, + 402, + 204 + ], + "type": "image", + "image_path": "174139ab6dcc12cd6a99f039cfe71445d47612cb61b456c34e139a4d44ce5b27.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 408, + 114, + 502, + 204 + ], + "blocks": [ + { + "bbox": [ + 408, + 114, + 502, + 204 + ], + "lines": [ + { + "bbox": [ + 408, + 114, + 502, + 204 + ], + "spans": [ + { + "bbox": [ + 408, + 114, + 502, + 204 + ], + "type": "image", + "image_path": "25c6f8568e7bcdc44e6309534f95b1a0b59bb18d7773a69a2d061ee65e297860.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 109, + 208, + 203, + 331 + ], + "blocks": [ + { + "bbox": [ + 109, + 208, + 203, + 331 + ], + "lines": [ + { + "bbox": [ + 109, + 208, + 203, + 331 + ], + "spans": [ + { + "bbox": [ + 109, + 208, + 203, + 331 + ], + "type": "image", + "image_path": "25046cf08634831335f437686635f7b1affeb5cf872dfd9bdeef73ad97a4f786.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 208, + 208, + 301, + 331 + ], + "blocks": [ + { + "bbox": [ + 208, + 208, + 301, + 331 + ], + "lines": [ + { + "bbox": [ + 208, + 208, + 301, + 331 + ], + "spans": [ + { + "bbox": [ + 208, + 208, + 301, + 331 + ], + "type": "image", + "image_path": "0a2a55819ee33dc9ba77d52891f0222654f300378e0c315f91df125d5feab4ef.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 308, + 208, + 402, + 331 + ], + "blocks": [ + { + "bbox": [ + 308, + 208, + 402, + 331 + ], + "lines": [ + { + "bbox": [ + 308, + 208, + 402, + 331 + ], + "spans": [ + { + "bbox": [ + 308, + 208, + 402, + 331 + ], + "type": "image", + "image_path": "34f3e69973949e582903e9064b4ccc8b319f75b359178b7208421be704e55d45.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 408, + 208, + 502, + 331 + ], + "blocks": [ + { + "bbox": [ + 408, + 208, + 502, + 331 + ], + "lines": [ + { + "bbox": [ + 408, + 208, + 502, + 331 + ], + "spans": [ + { + "bbox": [ + 408, + 208, + 502, + 331 + ], + "type": "image", + "image_path": "46dec341162765728e1fc8f0cd002ebf4ee74f97cd953e898c4270f10f2a5328.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 109, + 336, + 203, + 416 + ], + "blocks": [ + { + "bbox": [ + 109, + 336, + 203, + 416 + ], + "lines": [ + { + "bbox": [ + 109, + 336, + 203, + 416 + ], + "spans": [ + { + "bbox": [ + 109, + 336, + 203, + 416 + ], + "type": "image", + "image_path": "47297a815dfe05f13941cb00e869c4a19a92d4533649e2a583bafd94faf893c9.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 208, + 336, + 301, + 418 + ], + "blocks": [ + { + "bbox": [ + 208, + 336, + 301, + 418 + ], + "lines": [ + { + "bbox": [ + 208, + 336, + 301, + 418 + ], + "spans": [ + { + "bbox": [ + 208, + 336, + 301, + 418 + ], + "type": "image", + "image_path": "9f7537d877ef9e59c4b7d2865b5b20522610acea27d5295f2b7a540a7f158aa1.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 308, + 336, + 402, + 418 + ], + "blocks": [ + { + "bbox": [ + 308, + 336, + 402, + 418 + ], + "lines": [ + { + "bbox": [ + 308, + 336, + 402, + 418 + ], + "spans": [ + { + "bbox": [ + 308, + 336, + 402, + 418 + ], + "type": "image", + "image_path": "4425fde2f8933a0d0a27e3c8934be18f51d1e042682ca256127948d03c7d8026.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 408, + 336, + 502, + 418 + ], + "blocks": [ + { + "bbox": [ + 408, + 336, + 502, + 418 + ], + "lines": [ + { + "bbox": [ + 408, + 336, + 502, + 418 + ], + "spans": [ + { + "bbox": [ + 408, + 336, + 502, + 418 + ], + "type": "image", + "image_path": "29f5e73b544db2cde87a7ae619bda23d4e11cc25cfb4269634f02ce791227353.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 109, + 422, + 203, + 495 + ], + "blocks": [ + { + "bbox": [ + 109, + 422, + 203, + 495 + ], + "lines": [ + { + "bbox": [ + 109, + 422, + 203, + 495 + ], + "spans": [ + { + "bbox": [ + 109, + 422, + 203, + 495 + ], + "type": "image", + "image_path": "6b407e152dd4b4cace6037d3643ed856c1e206632e4bbccf517c4fef9f21d5ff.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 663, + 533, + 730 + ], + "lines": [ + { + "bbox": [ + 77, + 663, + 533, + 730 + ], + "spans": [ + { + "bbox": [ + 77, + 663, + 533, + 730 + ], + "type": "text", + "content": "Figure 24: Task: image denoising, aiming to remove the noise information and obtain high-quality clear version. Setup: We compare GPT-4o with InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the denoised images. Observations: GPT-4o can restore high-quality denoised images. Except for the second image, where the noise cannot be completely removed, the other images are free from noise. However, for low-level tasks, GPT-4o does not maintain content consistency well — the background colors and object shapes in many images have changed, such as the background color in the first image and the floor in the fourth image." + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 208, + 422, + 301, + 495 + ], + "blocks": [ + { + "bbox": [ + 208, + 422, + 301, + 495 + ], + "lines": [ + { + "bbox": [ + 208, + 422, + 301, + 495 + ], + "spans": [ + { + "bbox": [ + 208, + 422, + 301, + 495 + ], + "type": "image", + "image_path": "180b6d2fbe08d1106795eea1f9a5165620b7ff33122c84b0e0c53f5d47bd52a9.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 308, + 422, + 402, + 495 + ], + "blocks": [ + { + "bbox": [ + 308, + 422, + 402, + 495 + ], + "lines": [ + { + "bbox": [ + 308, + 422, + 402, + 495 + ], + "spans": [ + { + "bbox": [ + 308, + 422, + 402, + 495 + ], + "type": "image", + "image_path": "4e2bd472765d312f5f2fa59f23f64b308cadfa60ddeaef7c74ca0effcf81c1de.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 408, + 422, + 502, + 495 + ], + "blocks": [ + { + "bbox": [ + 408, + 422, + 502, + 495 + ], + "lines": [ + { + "bbox": [ + 408, + 422, + 502, + 495 + ], + "spans": [ + { + "bbox": [ + 408, + 422, + 502, + 495 + ], + "type": "image", + "image_path": "345956a47d1ba18bc47ef420be35f2680ee92364e251cc4f0c85890f41d35a52.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 109, + 501, + 203, + 603 + ], + "blocks": [ + { + "bbox": [ + 109, + 501, + 203, + 603 + ], + "lines": [ + { + "bbox": [ + 109, + 501, + 203, + 603 + ], + "spans": [ + { + "bbox": [ + 109, + 501, + 203, + 603 + ], + "type": "image", + "image_path": "ea1923c868229c788db11eb4c4c0a0c7fdfff3244b7867c71f36ecc71509d140.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 208, + 501, + 301, + 603 + ], + "blocks": [ + { + "bbox": [ + 208, + 501, + 301, + 603 + ], + "lines": [ + { + "bbox": [ + 208, + 501, + 301, + 603 + ], + "spans": [ + { + "bbox": [ + 208, + 501, + 301, + 603 + ], + "type": "image", + "image_path": "0d95f3d9522c913d7fb63c52805b36252551d9de67bab8edf492afd706660e7d.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 308, + 501, + 402, + 603 + ], + "blocks": [ + { + "bbox": [ + 308, + 501, + 402, + 603 + ], + "lines": [ + { + "bbox": [ + 308, + 501, + 402, + 603 + ], + "spans": [ + { + "bbox": [ + 308, + 501, + 402, + 603 + ], + "type": "image", + "image_path": "2865da09f43ccfb7bad4c1a38f720f8a7f22179ea35f92a1539146184df21cd6.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 408, + 501, + 502, + 603 + ], + "blocks": [ + { + "bbox": [ + 408, + 501, + 502, + 603 + ], + "lines": [ + { + "bbox": [ + 408, + 501, + 502, + 603 + ], + "spans": [ + { + "bbox": [ + 408, + 501, + 502, + 603 + ], + "type": "image", + "image_path": "273a3c5b1d1bd94335184aafbed95bd03cde1025a92f44aa700ed255ea3c88e5.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "bbox": [ + 113, + 605, + 351, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 605, + 351, + 617 + ], + "spans": [ + { + "bbox": [ + 113, + 605, + 351, + 617 + ], + "type": "text", + "content": "Input Text: \"Remove the noise, make the image clear.\"" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 127, + 622, + 183, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 622, + 183, + 634 + ], + "spans": [ + { + "bbox": [ + 127, + 622, + 183, + 634 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 236, + 621, + 268, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 621, + 268, + 631 + ], + "spans": [ + { + "bbox": [ + 236, + 621, + 268, + 631 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 319, + 622, + 391, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 622, + 391, + 632 + ], + "spans": [ + { + "bbox": [ + 319, + 622, + 391, + 632 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 429, + 621, + 478, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 429, + 621, + 478, + 631 + ], + "spans": [ + { + "bbox": [ + 429, + 621, + 478, + 631 + ], + "type": "text", + "content": "InstructIR" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "bbox": [ + 120, + 151, + 166, + 164 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 151, + 166, + 164 + ], + "spans": [ + { + "bbox": [ + 120, + 151, + 166, + 164 + ], + "type": "text", + "content": "Deraining" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 237, + 170, + 381, + 184 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 170, + 381, + 184 + ], + "spans": [ + { + "bbox": [ + 237, + 170, + 381, + 184 + ], + "type": "text", + "content": "Evaluation: Image Quality." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 97, + 190, + 199, + 260 + ], + "blocks": [ + { + "bbox": [ + 97, + 190, + 199, + 260 + ], + "lines": [ + { + "bbox": [ + 97, + 190, + 199, + 260 + ], + "spans": [ + { + "bbox": [ + 97, + 190, + 199, + 260 + ], + "type": "image", + "image_path": "c33450ac171eaded049f91066f0deeba4af786415bc6dd5d653dc55c54b3ee41.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 203, + 190, + 304, + 260 + ], + "blocks": [ + { + "bbox": [ + 203, + 190, + 304, + 260 + ], + "lines": [ + { + "bbox": [ + 203, + 190, + 304, + 260 + ], + "spans": [ + { + "bbox": [ + 203, + 190, + 304, + 260 + ], + "type": "image", + "image_path": "e15b31d827df49078bd8ff4b5a3c0b7e9766b7e0ce33322d4b3e8fa24173ca40.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 309, + 190, + 411, + 260 + ], + "blocks": [ + { + "bbox": [ + 309, + 190, + 411, + 260 + ], + "lines": [ + { + "bbox": [ + 309, + 190, + 411, + 260 + ], + "spans": [ + { + "bbox": [ + 309, + 190, + 411, + 260 + ], + "type": "image", + "image_path": "4bd0fc1b08f65d2b90caeb19644fda48d0c14d56f7a3b6d5b0258145a86adade.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 416, + 190, + 517, + 260 + ], + "blocks": [ + { + "bbox": [ + 416, + 190, + 517, + 260 + ], + "lines": [ + { + "bbox": [ + 416, + 190, + 517, + 260 + ], + "spans": [ + { + "bbox": [ + 416, + 190, + 517, + 260 + ], + "type": "image", + "image_path": "dccfcb457f6ecc58a1354c97e2786be9af3ddba306e9c14119bc34b83281433c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 97, + 266, + 198, + 335 + ], + "blocks": [ + { + "bbox": [ + 97, + 266, + 198, + 335 + ], + "lines": [ + { + "bbox": [ + 97, + 266, + 198, + 335 + ], + "spans": [ + { + "bbox": [ + 97, + 266, + 198, + 335 + ], + "type": "image", + "image_path": "e7da05826e2092da3f8c786eaba5c6a6330ab3558f6720c639f754e67843897b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 203, + 266, + 304, + 334 + ], + "blocks": [ + { + "bbox": [ + 203, + 266, + 304, + 334 + ], + "lines": [ + { + "bbox": [ + 203, + 266, + 304, + 334 + ], + "spans": [ + { + "bbox": [ + 203, + 266, + 304, + 334 + ], + "type": "image", + "image_path": "bb6ced5f0c7f3239e75cdaf62739d7bd88f4e2dea5220e63e06fbbda019b0215.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 309, + 266, + 411, + 335 + ], + "blocks": [ + { + "bbox": [ + 309, + 266, + 411, + 335 + ], + "lines": [ + { + "bbox": [ + 309, + 266, + 411, + 335 + ], + "spans": [ + { + "bbox": [ + 309, + 266, + 411, + 335 + ], + "type": "image", + "image_path": "a0be173027e50abb4eeaa3a09379cf5495e4baeb4d07b8cfe3c3fbfc7f49da66.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 416, + 266, + 517, + 335 + ], + "blocks": [ + { + "bbox": [ + 416, + 266, + 517, + 335 + ], + "lines": [ + { + "bbox": [ + 416, + 266, + 517, + 335 + ], + "spans": [ + { + "bbox": [ + 416, + 266, + 517, + 335 + ], + "type": "image", + "image_path": "902fc2f31952ca02a07900b777fb797fe5708419dadaf611ebdbe7c0305895cd.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 97, + 341, + 199, + 408 + ], + "blocks": [ + { + "bbox": [ + 97, + 341, + 199, + 408 + ], + "lines": [ + { + "bbox": [ + 97, + 341, + 199, + 408 + ], + "spans": [ + { + "bbox": [ + 97, + 341, + 199, + 408 + ], + "type": "image", + "image_path": "53b1fed05e6b1e6eefadf734ccb916b0779e1b0b4d17f572e0bed7be77130d6c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 203, + 341, + 304, + 409 + ], + "blocks": [ + { + "bbox": [ + 203, + 341, + 304, + 409 + ], + "lines": [ + { + "bbox": [ + 203, + 341, + 304, + 409 + ], + "spans": [ + { + "bbox": [ + 203, + 341, + 304, + 409 + ], + "type": "image", + "image_path": "45630d882ee83bb0a62e1013d838d630afca2a25e99127c4ab251b2d79d6652d.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 309, + 341, + 411, + 410 + ], + "blocks": [ + { + "bbox": [ + 309, + 341, + 411, + 410 + ], + "lines": [ + { + "bbox": [ + 309, + 341, + 411, + 410 + ], + "spans": [ + { + "bbox": [ + 309, + 341, + 411, + 410 + ], + "type": "image", + "image_path": "7e92a2b49a69f9d2edbf84340e1fa8c4dbc2cf0e60655a02c6d49818f70c5d3d.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 416, + 341, + 517, + 410 + ], + "blocks": [ + { + "bbox": [ + 416, + 341, + 517, + 410 + ], + "lines": [ + { + "bbox": [ + 416, + 341, + 517, + 410 + ], + "spans": [ + { + "bbox": [ + 416, + 341, + 517, + 410 + ], + "type": "image", + "image_path": "a8bf6e323c48a02f642dd0e8b8ab2bf45779e5f873173853590a980210b6b42b.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 97, + 414, + 199, + 499 + ], + "blocks": [ + { + "bbox": [ + 97, + 414, + 199, + 499 + ], + "lines": [ + { + "bbox": [ + 97, + 414, + 199, + 499 + ], + "spans": [ + { + "bbox": [ + 97, + 414, + 199, + 499 + ], + "type": "image", + "image_path": "536e0b422f449656c91454149f3446fde812ec0f995fcf5bbe9e0a43e2d597fa.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 502, + 358, + 515 + ], + "lines": [ + { + "bbox": [ + 105, + 502, + 358, + 515 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 358, + 515 + ], + "type": "text", + "content": "Input Text: \"Remove the rain, make the image clear.\"" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 203, + 414, + 304, + 499 + ], + "blocks": [ + { + "bbox": [ + 203, + 414, + 304, + 499 + ], + "lines": [ + { + "bbox": [ + 203, + 414, + 304, + 499 + ], + "spans": [ + { + "bbox": [ + 203, + 414, + 304, + 499 + ], + "type": "image", + "image_path": "d900d2a2f4a7e03bc322a876b3c802692357c1fec98fb4a0bdfe6de558f8f44a.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 558, + 533, + 647 + ], + "lines": [ + { + "bbox": [ + 77, + 558, + 533, + 647 + ], + "spans": [ + { + "bbox": [ + 77, + 558, + 533, + 647 + ], + "type": "text", + "content": "Figure 25: Task: image deraining, aiming to remove the rain streak and get high-quality clear version. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the derained images. Observations: The overall performance of the GPT-4o is well. However, the model struggles with maintaining content consistency in low-level visual details — for instance, the polar bear's background in the first image becomes unnaturally pink, and the underwater scene loses depth and clarity. The flowers also appear altered in color and arrangement. In contrast, InstructIR demonstrates the most consistent performance across all examples, effectively removing rain while preserving the original scene's structure, color, and composition. Overall, InstructIR is the most balanced and accurate model for image restoration in this comparison." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 309, + 414, + 411, + 499 + ], + "blocks": [ + { + "bbox": [ + 309, + 414, + 411, + 499 + ], + "lines": [ + { + "bbox": [ + 309, + 414, + 411, + 499 + ], + "spans": [ + { + "bbox": [ + 309, + 414, + 411, + 499 + ], + "type": "image", + "image_path": "4f7cd151bdab47a69f14cb0fefb2b225b07676d7de97906f3f09e608ae362d1f.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 416, + 414, + 517, + 500 + ], + "blocks": [ + { + "bbox": [ + 416, + 414, + 517, + 500 + ], + "lines": [ + { + "bbox": [ + 416, + 414, + 517, + 500 + ], + "spans": [ + { + "bbox": [ + 416, + 414, + 517, + 500 + ], + "type": "image", + "image_path": "a1cf0fd8a07f055d804221a00dac3ab6196ba65037b7980ae7d6ac1150ef8152.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 117, + 524, + 177, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 524, + 177, + 537 + ], + "spans": [ + { + "bbox": [ + 117, + 524, + 177, + 537 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 236, + 525, + 271, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 525, + 271, + 536 + ], + "spans": [ + { + "bbox": [ + 236, + 525, + 271, + 536 + ], + "type": "text", + "content": "GPT40" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 319, + 525, + 397, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 525, + 397, + 536 + ], + "spans": [ + { + "bbox": [ + 319, + 525, + 397, + 536 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 438, + 525, + 492, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 438, + 525, + 492, + 536 + ], + "spans": [ + { + "bbox": [ + 438, + 525, + 492, + 536 + ], + "type": "text", + "content": "InstructIR" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "36" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 171, + 165, + 183 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 171, + 165, + 183 + ], + "spans": [ + { + "bbox": [ + 121, + 171, + 165, + 183 + ], + "type": "text", + "content": "Dehazing" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 233, + 185, + 376, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 185, + 376, + 201 + ], + "spans": [ + { + "bbox": [ + 233, + 185, + 376, + 201 + ], + "type": "text", + "content": "Evaluation: Image Quality." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 100, + 205, + 196, + 278 + ], + "blocks": [ + { + "bbox": [ + 100, + 205, + 196, + 278 + ], + "lines": [ + { + "bbox": [ + 100, + 205, + 196, + 278 + ], + "spans": [ + { + "bbox": [ + 100, + 205, + 196, + 278 + ], + "type": "image", + "image_path": "0b161ea7ba45146ed5301977dbc4153cddf37d20e4042c25caf795a6761d7b64.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 204, + 205, + 299, + 278 + ], + "blocks": [ + { + "bbox": [ + 204, + 205, + 299, + 278 + ], + "lines": [ + { + "bbox": [ + 204, + 205, + 299, + 278 + ], + "spans": [ + { + "bbox": [ + 204, + 205, + 299, + 278 + ], + "type": "image", + "image_path": "d9fef0de375f6e8e87b378aa29d3ee9d9b91df21b84ed0eb8eaf780d387eb2f7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 307, + 205, + 403, + 278 + ], + "blocks": [ + { + "bbox": [ + 307, + 205, + 403, + 278 + ], + "lines": [ + { + "bbox": [ + 307, + 205, + 403, + 278 + ], + "spans": [ + { + "bbox": [ + 307, + 205, + 403, + 278 + ], + "type": "image", + "image_path": "2f53f480f09632c6f3fce2f380303d7ddfff3462b4f02b3fbd217d4b98cd7bfb.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 414, + 205, + 509, + 278 + ], + "blocks": [ + { + "bbox": [ + 414, + 205, + 509, + 278 + ], + "lines": [ + { + "bbox": [ + 414, + 205, + 509, + 278 + ], + "spans": [ + { + "bbox": [ + 414, + 205, + 509, + 278 + ], + "type": "image", + "image_path": "1dcf8e8781f2caee3cccfcec05a958f963d7001ad1378b322b20be96f757857e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 100, + 288, + 196, + 369 + ], + "blocks": [ + { + "bbox": [ + 100, + 288, + 196, + 369 + ], + "lines": [ + { + "bbox": [ + 100, + 288, + 196, + 369 + ], + "spans": [ + { + "bbox": [ + 100, + 288, + 196, + 369 + ], + "type": "image", + "image_path": "192fef7db091bb823a304562be65f9026029dbec8cd7557e21fc725cf7a16820.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 539, + 533, + 627 + ], + "lines": [ + { + "bbox": [ + 77, + 539, + 533, + 627 + ], + "spans": [ + { + "bbox": [ + 77, + 539, + 533, + 627 + ], + "type": "text", + "content": "Figure 26: Task: image dehazing, aiming to remove the haze information and get high-quality clear version. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the dehazed images. Observations: GPT-4o performs moderately well in dehazing, managing to restore clearer structures and contrast in most scenes. However, its outputs often have a grayish or desaturated tone, especially visible in the second and third rows. Gemini 2.0 Flash produces more colorful results but tends to leave some haze behind, leading to a less crisp output. InstructIR outperforms both, offering the most visually natural and sharp dehazing across all examples while preserving original colors and details. Overall, InstructIR demonstrates the strongest capability in removing haze while maintaining realism." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 205, + 288, + 299, + 369 + ], + "blocks": [ + { + "bbox": [ + 205, + 288, + 299, + 369 + ], + "lines": [ + { + "bbox": [ + 205, + 288, + 299, + 369 + ], + "spans": [ + { + "bbox": [ + 205, + 288, + 299, + 369 + ], + "type": "image", + "image_path": "fa73cd36deac40a3a5de512bdd640d92cbdd918be0feb815b9659870541d646f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 308, + 288, + 403, + 369 + ], + "blocks": [ + { + "bbox": [ + 308, + 288, + 403, + 369 + ], + "lines": [ + { + "bbox": [ + 308, + 288, + 403, + 369 + ], + "spans": [ + { + "bbox": [ + 308, + 288, + 403, + 369 + ], + "type": "image", + "image_path": "1f7f8dc7c95580fa7127ddf404e77ee13666d31a7cb6fd3eb7e66943921a35a7.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 414, + 288, + 509, + 369 + ], + "blocks": [ + { + "bbox": [ + 414, + 288, + 509, + 369 + ], + "lines": [ + { + "bbox": [ + 414, + 288, + 509, + 369 + ], + "spans": [ + { + "bbox": [ + 414, + 288, + 509, + 369 + ], + "type": "image", + "image_path": "2b91fde29b7d65bb3965853dad07b3b346f976065009ddf3ee693efe1429b9de.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 101, + 379, + 196, + 476 + ], + "blocks": [ + { + "bbox": [ + 101, + 379, + 196, + 476 + ], + "lines": [ + { + "bbox": [ + 101, + 379, + 196, + 476 + ], + "spans": [ + { + "bbox": [ + 101, + 379, + 196, + 476 + ], + "type": "image", + "image_path": "6487fa3903724c091ac75d960053975fd920521719237fbe538abf890e5b7d14.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 205, + 379, + 299, + 477 + ], + "blocks": [ + { + "bbox": [ + 205, + 379, + 299, + 477 + ], + "lines": [ + { + "bbox": [ + 205, + 379, + 299, + 477 + ], + "spans": [ + { + "bbox": [ + 205, + 379, + 299, + 477 + ], + "type": "image", + "image_path": "b57c9edcd60d270e665488237cb0979f4c4725a3a0c7e8682695c4bab96be517.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 308, + 379, + 403, + 477 + ], + "blocks": [ + { + "bbox": [ + 308, + 379, + 403, + 477 + ], + "lines": [ + { + "bbox": [ + 308, + 379, + 403, + 477 + ], + "spans": [ + { + "bbox": [ + 308, + 379, + 403, + 477 + ], + "type": "image", + "image_path": "68285fa202a5fdaa42f6a5b5c9a5ad11300451918903881288401c4c6d69b8ef.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 414, + 379, + 509, + 476 + ], + "blocks": [ + { + "bbox": [ + 414, + 379, + 509, + 476 + ], + "lines": [ + { + "bbox": [ + 414, + 379, + 509, + 476 + ], + "spans": [ + { + "bbox": [ + 414, + 379, + 509, + 476 + ], + "type": "image", + "image_path": "7dd57547ecd2c2e9b99488cd75ff2d9c6688588f611f0c7b10caea435b7cf9dd.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 103, + 479, + 436, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 479, + 436, + 493 + ], + "spans": [ + { + "bbox": [ + 103, + 479, + 436, + 493 + ], + "type": "text", + "content": "Input Text: \"I took this photo during a foggy day. Can you improve it?\"" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 119, + 498, + 179, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 498, + 179, + 511 + ], + "spans": [ + { + "bbox": [ + 119, + 498, + 179, + 511 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 232, + 499, + 267, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 499, + 267, + 510 + ], + "spans": [ + { + "bbox": [ + 232, + 499, + 267, + 510 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 499, + 392, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 499, + 392, + 510 + ], + "spans": [ + { + "bbox": [ + 315, + 499, + 392, + 510 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 434, + 499, + 487, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 434, + 499, + 487, + 510 + ], + "spans": [ + { + "bbox": [ + 434, + 499, + 487, + 510 + ], + "type": "text", + "content": "InstructIR" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "37" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 36 + }, + { + "para_blocks": [ + { + "bbox": [ + 91, + 209, + 203, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 209, + 203, + 224 + ], + "spans": [ + { + "bbox": [ + 91, + 209, + 203, + 224 + ], + "type": "text", + "content": "Low-light Enhancement" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 227, + 232, + 359, + 248 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 232, + 359, + 248 + ], + "spans": [ + { + "bbox": [ + 227, + 232, + 359, + 248 + ], + "type": "text", + "content": "Evaluation: Consistency." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 100, + 253, + 196, + 319 + ], + "blocks": [ + { + "bbox": [ + 100, + 253, + 196, + 319 + ], + "lines": [ + { + "bbox": [ + 100, + 253, + 196, + 319 + ], + "spans": [ + { + "bbox": [ + 100, + 253, + 196, + 319 + ], + "type": "image", + "image_path": "a84122365c616f0e518763e95e8748954778d1d6f37580aebe6d9fcdb6dbdefd.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 203, + 253, + 299, + 319 + ], + "blocks": [ + { + "bbox": [ + 203, + 253, + 299, + 319 + ], + "lines": [ + { + "bbox": [ + 203, + 253, + 299, + 319 + ], + "spans": [ + { + "bbox": [ + 203, + 253, + 299, + 319 + ], + "type": "image", + "image_path": "a5bc8e52d7842f4252e539ba34adcea369c6d11bb3eaa72ea23966ff1c26f6e2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 307, + 252, + 403, + 317 + ], + "blocks": [ + { + "bbox": [ + 307, + 252, + 403, + 317 + ], + "lines": [ + { + "bbox": [ + 307, + 252, + 403, + 317 + ], + "spans": [ + { + "bbox": [ + 307, + 252, + 403, + 317 + ], + "type": "image", + "image_path": "c090907878b87d590ef3ced70007db5dbbe66202ce9b1e00122e2b8df532d6b6.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 410, + 252, + 507, + 317 + ], + "blocks": [ + { + "bbox": [ + 410, + 252, + 507, + 317 + ], + "lines": [ + { + "bbox": [ + 410, + 252, + 507, + 317 + ], + "spans": [ + { + "bbox": [ + 410, + 252, + 507, + 317 + ], + "type": "image", + "image_path": "995ae0408307d51c1fe4b22c7386e33f12f5f2de57aab5e0c882854a62a49b4a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 100, + 325, + 196, + 389 + ], + "blocks": [ + { + "bbox": [ + 100, + 325, + 196, + 389 + ], + "lines": [ + { + "bbox": [ + 100, + 325, + 196, + 389 + ], + "spans": [ + { + "bbox": [ + 100, + 325, + 196, + 389 + ], + "type": "image", + "image_path": "d9be1ffaecc96d29076914277e7c30d3db8c4735a052211f5df25b329509f3bf.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 524, + 533, + 591 + ], + "lines": [ + { + "bbox": [ + 77, + 524, + 533, + 591 + ], + "spans": [ + { + "bbox": [ + 77, + 524, + 533, + 591 + ], + "type": "text", + "content": "Figure 27: Task: low-light image enhancement, aiming to increase the brightness of the image to obtain a high brightness image. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the brightness images. Observations: In low-light enhancement tasks, GPT-4o can brighten images and recover basic visibility, but often introduces unnatural lighting and loses detail, especially in the second row, where the image remains overly dark. InstructIR consistently delivers the most balanced results, enhancing visibility while preserving true colors and textures, making it the best performer across all three examples." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 203, + 325, + 299, + 389 + ], + "blocks": [ + { + "bbox": [ + 203, + 325, + 299, + 389 + ], + "lines": [ + { + "bbox": [ + 203, + 325, + 299, + 389 + ], + "spans": [ + { + "bbox": [ + 203, + 325, + 299, + 389 + ], + "type": "image", + "image_path": "7c783389b6755b3395ca901bd7e2c2f77ed95615d85ca58ae5a381dd0a4cbd5e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 307, + 325, + 403, + 388 + ], + "blocks": [ + { + "bbox": [ + 307, + 325, + 403, + 388 + ], + "lines": [ + { + "bbox": [ + 307, + 325, + 403, + 388 + ], + "spans": [ + { + "bbox": [ + 307, + 325, + 403, + 388 + ], + "type": "image", + "image_path": "1a5b4b1774fdc23750f023c71c85c56ea578acf01ebde2ea68333a166999bf31.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 410, + 325, + 507, + 389 + ], + "blocks": [ + { + "bbox": [ + 410, + 325, + 507, + 389 + ], + "lines": [ + { + "bbox": [ + 410, + 325, + 507, + 389 + ], + "spans": [ + { + "bbox": [ + 410, + 325, + 507, + 389 + ], + "type": "image", + "image_path": "59b7eab0c07c081369aaeb8fc99ee1be38d0f888691576752368cada60c2dfd5.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 100, + 396, + 196, + 461 + ], + "blocks": [ + { + "bbox": [ + 100, + 396, + 196, + 461 + ], + "lines": [ + { + "bbox": [ + 100, + 396, + 196, + 461 + ], + "spans": [ + { + "bbox": [ + 100, + 396, + 196, + 461 + ], + "type": "image", + "image_path": "bdfaed21c06e793694872cf312fb039cb068ab6d61c502ad781bc11ec2d4c06a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 204, + 396, + 299, + 461 + ], + "blocks": [ + { + "bbox": [ + 204, + 396, + 299, + 461 + ], + "lines": [ + { + "bbox": [ + 204, + 396, + 299, + 461 + ], + "spans": [ + { + "bbox": [ + 204, + 396, + 299, + 461 + ], + "type": "image", + "image_path": "7ea976f449a8043690de06844c2b94aafce18ca58e6beea0e7e26544e3ab2899.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 307, + 396, + 403, + 460 + ], + "blocks": [ + { + "bbox": [ + 307, + 396, + 403, + 460 + ], + "lines": [ + { + "bbox": [ + 307, + 396, + 403, + 460 + ], + "spans": [ + { + "bbox": [ + 307, + 396, + 403, + 460 + ], + "type": "image", + "image_path": "c00e98505d0ec6544c9ab3f147c4ec2eb644ae9be7a7e5bf1a8d123c564b6686.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 410, + 396, + 507, + 461 + ], + "blocks": [ + { + "bbox": [ + 410, + 396, + 507, + 461 + ], + "lines": [ + { + "bbox": [ + 410, + 396, + 507, + 461 + ], + "spans": [ + { + "bbox": [ + 410, + 396, + 507, + 461 + ], + "type": "image", + "image_path": "2043ba7207ad0d9c159f8b1abd4cb8a7575070b2f4ddccfd301508c5cbc7ce5a.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 464, + 484, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 464, + 484, + 479 + ], + "spans": [ + { + "bbox": [ + 106, + 464, + 484, + 479 + ], + "type": "text", + "content": "Input Text: \"I took My image is too dark, I cannot see anything. Can you fix it?\"" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 118, + 488, + 179, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 488, + 179, + 502 + ], + "spans": [ + { + "bbox": [ + 118, + 488, + 179, + 502 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 234, + 488, + 269, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 234, + 488, + 269, + 499 + ], + "spans": [ + { + "bbox": [ + 234, + 488, + 269, + 499 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 488, + 395, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 488, + 395, + 500 + ], + "spans": [ + { + "bbox": [ + 317, + 488, + 395, + 500 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 434, + 488, + 488, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 434, + 488, + 488, + 499 + ], + "spans": [ + { + "bbox": [ + 434, + 488, + 488, + 499 + ], + "type": "text", + "content": "InstructIR" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "38" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 37 + }, + { + "para_blocks": [ + { + "bbox": [ + 120, + 184, + 168, + 198 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 184, + 168, + 198 + ], + "spans": [ + { + "bbox": [ + 120, + 184, + 168, + 198 + ], + "type": "text", + "content": "Debluring" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 231, + 204, + 376, + 220 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 204, + 376, + 220 + ], + "spans": [ + { + "bbox": [ + 231, + 204, + 376, + 220 + ], + "type": "text", + "content": "Evaluation: Image Quality." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 95, + 225, + 198, + 303 + ], + "blocks": [ + { + "bbox": [ + 95, + 225, + 198, + 303 + ], + "lines": [ + { + "bbox": [ + 95, + 225, + 198, + 303 + ], + "spans": [ + { + "bbox": [ + 95, + 225, + 198, + 303 + ], + "type": "image", + "image_path": "85f4dbb3e1869fc684c51a8d3da40afdad3cedebd548adaef1a8346cf9e23270.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 201, + 225, + 302, + 302 + ], + "blocks": [ + { + "bbox": [ + 201, + 225, + 302, + 302 + ], + "lines": [ + { + "bbox": [ + 201, + 225, + 302, + 302 + ], + "spans": [ + { + "bbox": [ + 201, + 225, + 302, + 302 + ], + "type": "image", + "image_path": "6b9df00e9d9d77d03f6e7d0327a679c41ab300965c8160ed8995d13466569736.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 306, + 225, + 408, + 302 + ], + "blocks": [ + { + "bbox": [ + 306, + 225, + 408, + 302 + ], + "lines": [ + { + "bbox": [ + 306, + 225, + 408, + 302 + ], + "spans": [ + { + "bbox": [ + 306, + 225, + 408, + 302 + ], + "type": "image", + "image_path": "37feb29d085233a01159a45ae5e03d6b57ff8f11c715ca55b4a77dfee3fc645b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 410, + 225, + 512, + 303 + ], + "blocks": [ + { + "bbox": [ + 410, + 225, + 512, + 303 + ], + "lines": [ + { + "bbox": [ + 410, + 225, + 512, + 303 + ], + "spans": [ + { + "bbox": [ + 410, + 225, + 512, + 303 + ], + "type": "image", + "image_path": "5d41134c1d56df0d9d4e13222e77a77b5332513dbf0e28ef10e50470295d17e3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 96, + 304, + 197, + 375 + ], + "blocks": [ + { + "bbox": [ + 96, + 304, + 197, + 375 + ], + "lines": [ + { + "bbox": [ + 96, + 304, + 197, + 375 + ], + "spans": [ + { + "bbox": [ + 96, + 304, + 197, + 375 + ], + "type": "image", + "image_path": "98924e97a312fd055c311dfce4f2f33c194a08959c53493dda6596e9bbacc31e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 201, + 304, + 302, + 375 + ], + "blocks": [ + { + "bbox": [ + 201, + 304, + 302, + 375 + ], + "lines": [ + { + "bbox": [ + 201, + 304, + 302, + 375 + ], + "spans": [ + { + "bbox": [ + 201, + 304, + 302, + 375 + ], + "type": "image", + "image_path": "5930d203da41b02946e5b564261b3a200c305a6a549a4ba7c3961c80697c16ce.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 304, + 304, + 408, + 375 + ], + "blocks": [ + { + "bbox": [ + 304, + 304, + 408, + 375 + ], + "lines": [ + { + "bbox": [ + 304, + 304, + 408, + 375 + ], + "spans": [ + { + "bbox": [ + 304, + 304, + 408, + 375 + ], + "type": "image", + "image_path": "c52ea48115f508872bd5b5ec87bc06b52622f4b6d3cea22c4f58d26b4d869481.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 410, + 304, + 512, + 375 + ], + "blocks": [ + { + "bbox": [ + 410, + 304, + 512, + 375 + ], + "lines": [ + { + "bbox": [ + 410, + 304, + 512, + 375 + ], + "spans": [ + { + "bbox": [ + 410, + 304, + 512, + 375 + ], + "type": "image", + "image_path": "3270cef07702d259baaf487526f2ae6da683fab7cc7fbfd29b04b26044a58e69.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 96, + 378, + 198, + 448 + ], + "blocks": [ + { + "bbox": [ + 96, + 378, + 198, + 448 + ], + "lines": [ + { + "bbox": [ + 96, + 378, + 198, + 448 + ], + "spans": [ + { + "bbox": [ + 96, + 378, + 198, + 448 + ], + "type": "image", + "image_path": "d25e5d34348f2c9916b8707da5ebf28418590c98d607597c3205410432cd39ab.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 99, + 451, + 485, + 477 + ], + "lines": [ + { + "bbox": [ + 99, + 451, + 485, + 477 + ], + "spans": [ + { + "bbox": [ + 99, + 451, + 485, + 477 + ], + "type": "text", + "content": "Input Text: \"I took this photo while I was running, can you stabilize the image? it is too blurry.\"" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 201, + 378, + 302, + 448 + ], + "blocks": [ + { + "bbox": [ + 201, + 378, + 302, + 448 + ], + "lines": [ + { + "bbox": [ + 201, + 378, + 302, + 448 + ], + "spans": [ + { + "bbox": [ + 201, + 378, + 302, + 448 + ], + "type": "image", + "image_path": "10e10b4b43504d485d1b89173a358ad873c0bb08494a06dc780f0f725b125906.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 523, + 533, + 613 + ], + "lines": [ + { + "bbox": [ + 77, + 523, + 533, + 613 + ], + "spans": [ + { + "bbox": [ + 77, + 523, + 533, + 613 + ], + "type": "text", + "content": "Figure 28: Task: image deblurring, aiming to remove the blur information to obtain a clear image. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the deblurred images. Observations: For motion deblurring, GPT-4o recovers some sharpness, especially in fine details like text or faces, but the content is not matched with the original image. Gemini 2.0 Flash sharpens the image slightly better in some cases but can introduce over-smoothing, making the result look artificial. InstructIR demonstrates the best deblurring performance overall — restoring clear edges, facial features, and text while maintaining natural textures. It consistently produces the most stable and visually convincing results across all examples." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 304, + 378, + 408, + 448 + ], + "blocks": [ + { + "bbox": [ + 304, + 378, + 408, + 448 + ], + "lines": [ + { + "bbox": [ + 304, + 378, + 408, + 448 + ], + "spans": [ + { + "bbox": [ + 304, + 378, + 408, + 448 + ], + "type": "image", + "image_path": "324c435155e439e0e4927509fb4bc07e46d67b68706e04a95c81c2c43616dade.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 410, + 378, + 512, + 448 + ], + "blocks": [ + { + "bbox": [ + 410, + 378, + 512, + 448 + ], + "lines": [ + { + "bbox": [ + 410, + 378, + 512, + 448 + ], + "spans": [ + { + "bbox": [ + 410, + 378, + 512, + 448 + ], + "type": "image", + "image_path": "0d5d4b908f4ff3dbdff9941212fc68ab04e87f2ef75d6228897f56880dd27fc6.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 117, + 486, + 178, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 486, + 178, + 500 + ], + "spans": [ + { + "bbox": [ + 117, + 486, + 178, + 500 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 232, + 486, + 268, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 486, + 268, + 498 + ], + "spans": [ + { + "bbox": [ + 232, + 486, + 268, + 498 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 486, + 395, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 486, + 395, + 498 + ], + "spans": [ + { + "bbox": [ + 316, + 486, + 395, + 498 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 433, + 486, + 488, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 433, + 486, + 488, + 498 + ], + "spans": [ + { + "bbox": [ + 433, + 486, + 488, + 498 + ], + "type": "text", + "content": "InstructIR" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "39" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 38 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 182, + 187, + 196 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 187, + 196 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 187, + 196 + ], + "type": "text", + "content": "Super-Resolution" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 231, + 194, + 376, + 212 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 194, + 376, + 212 + ], + "spans": [ + { + "bbox": [ + 231, + 194, + 376, + 212 + ], + "type": "text", + "content": "Evaluation: Image Quality." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 102, + 216, + 201, + 314 + ], + "blocks": [ + { + "bbox": [ + 102, + 216, + 201, + 314 + ], + "lines": [ + { + "bbox": [ + 102, + 216, + 201, + 314 + ], + "spans": [ + { + "bbox": [ + 102, + 216, + 201, + 314 + ], + "type": "image", + "image_path": "4f5025e7e13970ffe6ae4d344132af9a438277305e7345db9006ce618d0573fb.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 206, + 216, + 304, + 313 + ], + "blocks": [ + { + "bbox": [ + 206, + 216, + 304, + 313 + ], + "lines": [ + { + "bbox": [ + 206, + 216, + 304, + 313 + ], + "spans": [ + { + "bbox": [ + 206, + 216, + 304, + 313 + ], + "type": "image", + "image_path": "16dc34668ed85bc66c78fa13a9e59e97bd1551b7dcf1f770849b1c6ec45313ab.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 309, + 216, + 408, + 313 + ], + "blocks": [ + { + "bbox": [ + 309, + 216, + 408, + 313 + ], + "lines": [ + { + "bbox": [ + 309, + 216, + 408, + 313 + ], + "spans": [ + { + "bbox": [ + 309, + 216, + 408, + 313 + ], + "type": "image", + "image_path": "536066035c978487014a28aff6357f8949e5085087a1135fefd97bd330b10ad5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 413, + 216, + 511, + 313 + ], + "blocks": [ + { + "bbox": [ + 413, + 216, + 511, + 313 + ], + "lines": [ + { + "bbox": [ + 413, + 216, + 511, + 313 + ], + "spans": [ + { + "bbox": [ + 413, + 216, + 511, + 313 + ], + "type": "image", + "image_path": "9bfe1fd24e221db430a1ef2c97a6b127a44b5d7b0c9d1975f8c8209268003e53.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 103, + 319, + 200, + 397 + ], + "blocks": [ + { + "bbox": [ + 103, + 319, + 200, + 397 + ], + "lines": [ + { + "bbox": [ + 103, + 319, + 200, + 397 + ], + "spans": [ + { + "bbox": [ + 103, + 319, + 200, + 397 + ], + "type": "image", + "image_path": "787c6e3871eb657b7ad2619b07c940f4253e90dda0a171100d9c16bb8ce5179e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 548, + 533, + 616 + ], + "lines": [ + { + "bbox": [ + 77, + 548, + 533, + 616 + ], + "spans": [ + { + "bbox": [ + 77, + 548, + 533, + 616 + ], + "type": "text", + "content": "Figure 29: Task: image super-resolution, aiming to improve the image resolution. Setup: We compare GPT-4o with established baselines such as InstructIR [20] and Gemini 2.0 Flash [99] to evaluate the deblurred images. Observations: In super-resolution, InstructIR delivers the most natural and detailed results across all examples—restoring fine edges in the card reader, realistic texture on the octopus, and sharp trees in the landscape. GPT-4o enhances clarity but misses details like the octopus surface and tree leaves. Gemini 2.0 Flash produces sharper outputs than GPT-4o but introduces unnatural textures and artifacts, especially in organic regions like the octopus and foliage." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 206, + 319, + 303, + 397 + ], + "blocks": [ + { + "bbox": [ + 206, + 319, + 303, + 397 + ], + "lines": [ + { + "bbox": [ + 206, + 319, + 303, + 397 + ], + "spans": [ + { + "bbox": [ + 206, + 319, + 303, + 397 + ], + "type": "image", + "image_path": "89139fed496336187890615f822656f053268a0928b6b8aad1304fa164575873.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 309, + 319, + 406, + 397 + ], + "blocks": [ + { + "bbox": [ + 309, + 319, + 406, + 397 + ], + "lines": [ + { + "bbox": [ + 309, + 319, + 406, + 397 + ], + "spans": [ + { + "bbox": [ + 309, + 319, + 406, + 397 + ], + "type": "image", + "image_path": "c817252ab152b2bdf4e05c1e86107c2ae73fac4e18e54c85852a65c86d6eb4ff.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 413, + 319, + 510, + 397 + ], + "blocks": [ + { + "bbox": [ + 413, + 319, + 510, + 397 + ], + "lines": [ + { + "bbox": [ + 413, + 319, + 510, + 397 + ], + "spans": [ + { + "bbox": [ + 413, + 319, + 510, + 397 + ], + "type": "image", + "image_path": "deeb960aa331c541bdfd82bbc98e2d7b1232c00088999d254c3e72f9a70ad184.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 103, + 403, + 200, + 474 + ], + "blocks": [ + { + "bbox": [ + 103, + 403, + 200, + 474 + ], + "lines": [ + { + "bbox": [ + 103, + 403, + 200, + 474 + ], + "spans": [ + { + "bbox": [ + 103, + 403, + 200, + 474 + ], + "type": "image", + "image_path": "4b7ed5536998e9b32588208e95ba79958f7565c5f1a32016788821d35ecd191a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 206, + 403, + 304, + 473 + ], + "blocks": [ + { + "bbox": [ + 206, + 403, + 304, + 473 + ], + "lines": [ + { + "bbox": [ + 206, + 403, + 304, + 473 + ], + "spans": [ + { + "bbox": [ + 206, + 403, + 304, + 473 + ], + "type": "image", + "image_path": "f91d6cbc924b18c4e72f2efd1f2c0ffe7a85d740967c0f696474bad835204b3e.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 309, + 403, + 406, + 473 + ], + "blocks": [ + { + "bbox": [ + 309, + 403, + 406, + 473 + ], + "lines": [ + { + "bbox": [ + 309, + 403, + 406, + 473 + ], + "spans": [ + { + "bbox": [ + 309, + 403, + 406, + 473 + ], + "type": "image", + "image_path": "23c403764728be45b17ee5db54c9ec0e5a6b239c1402350ba72598d480c17fbc.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 413, + 403, + 510, + 473 + ], + "blocks": [ + { + "bbox": [ + 413, + 403, + 510, + 473 + ], + "lines": [ + { + "bbox": [ + 413, + 403, + 510, + 473 + ], + "spans": [ + { + "bbox": [ + 413, + 403, + 510, + 473 + ], + "type": "image", + "image_path": "7c6e1db18970fe110e86871683e72f4fceaeb6a8c3e0c16fe2dfb612916f8a04.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 478, + 497, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 478, + 497, + 503 + ], + "spans": [ + { + "bbox": [ + 104, + 478, + 497, + 503 + ], + "type": "text", + "content": "Input Text: \"Make my photo bigger and better. Add details to this image. Increase the resolution of this photo.\"" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 120, + 511, + 181, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 511, + 181, + 525 + ], + "spans": [ + { + "bbox": [ + 120, + 511, + 181, + 525 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 238, + 511, + 272, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 511, + 272, + 522 + ], + "spans": [ + { + "bbox": [ + 238, + 511, + 272, + 522 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 511, + 395, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 511, + 395, + 523 + ], + "spans": [ + { + "bbox": [ + 317, + 511, + 395, + 523 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 429, + 511, + 482, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 429, + 511, + 482, + 523 + ], + "spans": [ + { + "bbox": [ + 429, + 511, + 482, + 523 + ], + "type": "text", + "content": "InstructIR" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "40" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 39 + }, + { + "para_blocks": [ + { + "bbox": [ + 119, + 149, + 170, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 149, + 170, + 163 + ], + "spans": [ + { + "bbox": [ + 119, + 149, + 170, + 163 + ], + "type": "text", + "content": "Inpainting" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 231, + 163, + 376, + 178 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 163, + 376, + 178 + ], + "spans": [ + { + "bbox": [ + 231, + 163, + 376, + 178 + ], + "type": "text", + "content": "Evaluation: Image Quality." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 109, + 186, + 197, + 274 + ], + "blocks": [ + { + "bbox": [ + 109, + 186, + 197, + 274 + ], + "lines": [ + { + "bbox": [ + 109, + 186, + 197, + 274 + ], + "spans": [ + { + "bbox": [ + 109, + 186, + 197, + 274 + ], + "type": "image", + "image_path": "c9d161d0a7f9636e0f15f7cf8b38ef3cde507b88822b80d1c054fc17965684c8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 208, + 186, + 296, + 274 + ], + "blocks": [ + { + "bbox": [ + 208, + 186, + 296, + 274 + ], + "lines": [ + { + "bbox": [ + 208, + 186, + 296, + 274 + ], + "spans": [ + { + "bbox": [ + 208, + 186, + 296, + 274 + ], + "type": "image", + "image_path": "5d0d22f1be19f77f4671c9695622b0c8c720ea17567c646c8545d5c47f0261dd.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 310, + 186, + 399, + 274 + ], + "blocks": [ + { + "bbox": [ + 310, + 186, + 399, + 274 + ], + "lines": [ + { + "bbox": [ + 310, + 186, + 399, + 274 + ], + "spans": [ + { + "bbox": [ + 310, + 186, + 399, + 274 + ], + "type": "image", + "image_path": "855b2a4ebf86a04b3d8e45ed6cc6bfe5358ad56960cbb002301c9715b3a3062e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 414, + 187, + 504, + 274 + ], + "blocks": [ + { + "bbox": [ + 414, + 187, + 504, + 274 + ], + "lines": [ + { + "bbox": [ + 414, + 187, + 504, + 274 + ], + "spans": [ + { + "bbox": [ + 414, + 187, + 504, + 274 + ], + "type": "image", + "image_path": "2a2ba40c70067c2bbfe87ba3140fb913b131f18ca0f6a0801c01555583398b16.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 113, + 277, + 436, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 277, + 436, + 290 + ], + "spans": [ + { + "bbox": [ + 113, + 277, + 436, + 290 + ], + "type": "text", + "content": "Input Text: \"Please inpainting the image, make it looks reasonable.\"" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 109, + 293, + 192, + 376 + ], + "blocks": [ + { + "bbox": [ + 109, + 293, + 192, + 376 + ], + "lines": [ + { + "bbox": [ + 109, + 293, + 192, + 376 + ], + "spans": [ + { + "bbox": [ + 109, + 293, + 192, + 376 + ], + "type": "image", + "image_path": "327aff3d754879bc07e69c135ac470cd76c2e5d5328c263cd495368496462207.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 206, + 295, + 291, + 377 + ], + "blocks": [ + { + "bbox": [ + 206, + 295, + 291, + 377 + ], + "lines": [ + { + "bbox": [ + 206, + 295, + 291, + 377 + ], + "spans": [ + { + "bbox": [ + 206, + 295, + 291, + 377 + ], + "type": "image", + "image_path": "fbb3f36e987742956eb393471e0d4822531b76d7cb4cd749008a8301e0d7abaf.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 309, + 295, + 400, + 378 + ], + "blocks": [ + { + "bbox": [ + 309, + 295, + 400, + 378 + ], + "lines": [ + { + "bbox": [ + 309, + 295, + 400, + 378 + ], + "spans": [ + { + "bbox": [ + 309, + 295, + 400, + 378 + ], + "type": "image", + "image_path": "8da79e08847cb47bf66249a4079de84e4d65ff7744d90920fa8a900bbdbaed53.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 413, + 295, + 503, + 378 + ], + "blocks": [ + { + "bbox": [ + 413, + 295, + 503, + 378 + ], + "lines": [ + { + "bbox": [ + 413, + 295, + 503, + 378 + ], + "spans": [ + { + "bbox": [ + 413, + 295, + 503, + 378 + ], + "type": "image", + "image_path": "d238501a0cfe7a41a5b2875bb5e131d9ad25dd839d448e2d25bc5ab1fcd60ac6.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 379, + 438, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 379, + 438, + 392 + ], + "spans": [ + { + "bbox": [ + 114, + 379, + 438, + 392 + ], + "type": "text", + "content": "Input Text: \"Please inpainting the image, make it looks reasonable.\"" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 109, + 399, + 194, + 487 + ], + "blocks": [ + { + "bbox": [ + 109, + 399, + 194, + 487 + ], + "lines": [ + { + "bbox": [ + 109, + 399, + 194, + 487 + ], + "spans": [ + { + "bbox": [ + 109, + 399, + 194, + 487 + ], + "type": "image", + "image_path": "0f8a4c52b6db3fbfb6fe2b7d165d3273b2849687f1ef570d6810290c516025db.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 207, + 399, + 291, + 487 + ], + "blocks": [ + { + "bbox": [ + 207, + 399, + 291, + 487 + ], + "lines": [ + { + "bbox": [ + 207, + 399, + 291, + 487 + ], + "spans": [ + { + "bbox": [ + 207, + 399, + 291, + 487 + ], + "type": "image", + "image_path": "2f732805f2b0034e45b6e06d068adc0a84b73d56f50fb84d71c1d98da5382f36.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 310, + 400, + 399, + 488 + ], + "blocks": [ + { + "bbox": [ + 310, + 400, + 399, + 488 + ], + "lines": [ + { + "bbox": [ + 310, + 400, + 399, + 488 + ], + "spans": [ + { + "bbox": [ + 310, + 400, + 399, + 488 + ], + "type": "image", + "image_path": "92d34852cfd83335f95aa56035188712f19e68bf3f090085cf6440a634a2b66a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 414, + 399, + 503, + 487 + ], + "blocks": [ + { + "bbox": [ + 414, + 399, + 503, + 487 + ], + "lines": [ + { + "bbox": [ + 414, + 399, + 503, + 487 + ], + "spans": [ + { + "bbox": [ + 414, + 399, + 503, + 487 + ], + "type": "image", + "image_path": "82a5f5ec145a93551e9693a7e4e48cedd5a986546f72da6f448a7c2c57ab92ee.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 112, + 490, + 508, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 490, + 508, + 514 + ], + "spans": [ + { + "bbox": [ + 112, + 490, + 508, + 514 + ], + "type": "text", + "content": "Input Text: \"Inpaint the missing part of the face in the image, making the restored area look natural and seamless.\"" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 123, + 524, + 183, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 524, + 183, + 537 + ], + "spans": [ + { + "bbox": [ + 123, + 524, + 183, + 537 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 234, + 524, + 269, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 234, + 524, + 269, + 536 + ], + "spans": [ + { + "bbox": [ + 234, + 524, + 269, + 536 + ], + "type": "text", + "content": "GPT40" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 525, + 394, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 525, + 394, + 536 + ], + "spans": [ + { + "bbox": [ + 316, + 525, + 394, + 536 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 436, + 524, + 492, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 436, + 524, + 492, + 535 + ], + "spans": [ + { + "bbox": [ + 436, + 524, + 492, + 535 + ], + "type": "text", + "content": "LatentPaint" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 77, + 561, + 532, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 561, + 532, + 649 + ], + "spans": [ + { + "bbox": [ + 77, + 561, + 532, + 649 + ], + "type": "text", + "content": "Figure 30: Task: Image inpainting, aiming to restore missing or masked regions in an image to appear natural and consistent with the context. Setup: We compare GPT-4o with baselines such as Gemini 2.0 Flash [99] and LatentPaint [22], evaluating their ability to fill in masked regions realistically. Observations: GPT-4o produces plausible completions but often lacks fine structure and texture alignment—e.g., the bricks in the first row appear flat and misaligned. Gemini 2.0 Flash generates more visually coherent textures, especially in natural scenes like the second row, but can introduce slight over-smoothing. LatentPaint performs the best, accurately reconstructing facial details and complex textures such as hair and expression in the third row, demonstrating superior semantic understanding and visual consistency." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "41" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 40 + }, + { + "para_blocks": [ + { + "bbox": [ + 143, + 77, + 192, + 89 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 77, + 192, + 89 + ], + "spans": [ + { + "bbox": [ + 143, + 77, + 192, + 89 + ], + "type": "text", + "content": "Outpainting" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 248, + 90, + 258, + 102 + ], + "blocks": [ + { + "bbox": [ + 248, + 90, + 258, + 102 + ], + "lines": [ + { + "bbox": [ + 248, + 90, + 258, + 102 + ], + "spans": [ + { + "bbox": [ + 248, + 90, + 258, + 102 + ], + "type": "image", + "image_path": "842316d408aba300ecb8a4b27bfcbba0807e6bf4b66c38430aceb0e69a3016e2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 260, + 92, + 370, + 104 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 260, + 92, + 370, + 104 + ], + "spans": [ + { + "bbox": [ + 260, + 92, + 370, + 104 + ], + "type": "text", + "content": "Evaluation: Image Quality." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 133, + 107, + 212, + 190 + ], + "blocks": [ + { + "bbox": [ + 133, + 107, + 212, + 190 + ], + "lines": [ + { + "bbox": [ + 133, + 107, + 212, + 190 + ], + "spans": [ + { + "bbox": [ + 133, + 107, + 212, + 190 + ], + "type": "image", + "image_path": "ecc01e85a79c76a542d72a3c3811916efd3af7089be84ce35914d93646b60cbd.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 221, + 108, + 306, + 191 + ], + "blocks": [ + { + "bbox": [ + 221, + 108, + 306, + 191 + ], + "lines": [ + { + "bbox": [ + 221, + 108, + 306, + 191 + ], + "spans": [ + { + "bbox": [ + 221, + 108, + 306, + 191 + ], + "type": "image", + "image_path": "63bf7481f04803305e25b194d0f6a798e29e4ad8347b13488f22af38472f7dbb.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 313, + 108, + 397, + 191 + ], + "blocks": [ + { + "bbox": [ + 313, + 108, + 397, + 191 + ], + "lines": [ + { + "bbox": [ + 313, + 108, + 397, + 191 + ], + "spans": [ + { + "bbox": [ + 313, + 108, + 397, + 191 + ], + "type": "image", + "image_path": "16e0aacca78d0ddc7841c44b3666005097467f6eb11e4ce97e8f4b2ead39c10b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 403, + 109, + 487, + 191 + ], + "blocks": [ + { + "bbox": [ + 403, + 109, + 487, + 191 + ], + "lines": [ + { + "bbox": [ + 403, + 109, + 487, + 191 + ], + "spans": [ + { + "bbox": [ + 403, + 109, + 487, + 191 + ], + "type": "image", + "image_path": "e739ed6e7d2e595e03926be7ac6c7b1fb3bd3badc49f246dca21b01655f93b1c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 193, + 484, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 193, + 484, + 235 + ], + "spans": [ + { + "bbox": [ + 138, + 193, + 484, + 235 + ], + "type": "text", + "content": "Input Text: \"Inpainting this image: a classic dark brown leather Chesterfield loveseat with tufted detailing and rolled arms. It sits in a cozy, traditionally styled living room with green walls, framed artwork, and warm lighting, creating an elegant and vintage atmosphere.\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 130, + 236, + 214, + 319 + ], + "blocks": [ + { + "bbox": [ + 130, + 236, + 214, + 319 + ], + "lines": [ + { + "bbox": [ + 130, + 236, + 214, + 319 + ], + "spans": [ + { + "bbox": [ + 130, + 236, + 214, + 319 + ], + "type": "image", + "image_path": "62b37976bf23d0f522572b1d32ef2041be79cd68b5ed5fc0163a0fe71955417f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 221, + 236, + 306, + 319 + ], + "blocks": [ + { + "bbox": [ + 221, + 236, + 306, + 319 + ], + "lines": [ + { + "bbox": [ + 221, + 236, + 306, + 319 + ], + "spans": [ + { + "bbox": [ + 221, + 236, + 306, + 319 + ], + "type": "image", + "image_path": "96f8443e9e1d95f4d7b78542413a7daa8e652b264b9a0a1fb5f13e9ccdf11c2d.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 312, + 236, + 397, + 319 + ], + "blocks": [ + { + "bbox": [ + 312, + 236, + 397, + 319 + ], + "lines": [ + { + "bbox": [ + 312, + 236, + 397, + 319 + ], + "spans": [ + { + "bbox": [ + 312, + 236, + 397, + 319 + ], + "type": "image", + "image_path": "dd6454a272ee52c994805637b87e75f5966a46cdbd76435fa0cf210c205b0168.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 403, + 236, + 487, + 319 + ], + "blocks": [ + { + "bbox": [ + 403, + 236, + 487, + 319 + ], + "lines": [ + { + "bbox": [ + 403, + 236, + 487, + 319 + ], + "spans": [ + { + "bbox": [ + 403, + 236, + 487, + 319 + ], + "type": "image", + "image_path": "2d141aa5b85e10194f8145486f1551abb222659e93a01d192127766df99e63e9.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 136, + 320, + 477, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 320, + 477, + 352 + ], + "spans": [ + { + "bbox": [ + 136, + 320, + 477, + 352 + ], + "type": "text", + "content": "Input Text: \"Extend the image to the left and right with a realistic continuation of the street, sidewalk, and background buildings. Maintain consistent lighting, shadows, and overall style.\"" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 130, + 353, + 214, + 436 + ], + "blocks": [ + { + "bbox": [ + 130, + 353, + 214, + 436 + ], + "lines": [ + { + "bbox": [ + 130, + 353, + 214, + 436 + ], + "spans": [ + { + "bbox": [ + 130, + 353, + 214, + 436 + ], + "type": "image", + "image_path": "d6cb4c9f05457cd35c0effca1f6abd864d75f43df91491467710d0bc3721a83c.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 625, + 533, + 734 + ], + "lines": [ + { + "bbox": [ + 77, + 625, + 533, + 734 + ], + "spans": [ + { + "bbox": [ + 77, + 625, + 533, + 734 + ], + "type": "text", + "content": "Figure 31: Task: Image outpainting, aiming to extend the visual content of an image beyond its original boundaries coherently and realistically. Setup: We compare GPT-4o with Gemini 2.0 Flash [99], and some Specialized outpainting methods (SGT+ [116], StrDiffusion [66] and Dream360 [1]), evaluating their ability to extend content while maintaining visual consistency in lighting, texture, and semantics. Observations: The Specialized outpainting methods consistently produces the most coherent extensions — for example, it accurately maintains the room's lighting and decor in the first row, continues architectural lines and street perspective in the second, and creates seamless snowy landscapes in the third. GPT-4o offers plausible structure but often lacks fine detail and texture continuity, such as mismatched snow gradients or missing shadows. Gemini 2.0 Flash performs slightly better in semantic extension than GPT-4o but can introduce lighting inconsistencies and abrupt transitions, particularly in wide scenes like the desert in the final row." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 221, + 353, + 306, + 435 + ], + "blocks": [ + { + "bbox": [ + 221, + 353, + 306, + 435 + ], + "lines": [ + { + "bbox": [ + 221, + 353, + 306, + 435 + ], + "spans": [ + { + "bbox": [ + 221, + 353, + 306, + 435 + ], + "type": "image", + "image_path": "88aa98d1328c5f92799e7830df3cc592e78892873c7d450de34f064e06cadf11.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 312, + 353, + 397, + 435 + ], + "blocks": [ + { + "bbox": [ + 312, + 353, + 397, + 435 + ], + "lines": [ + { + "bbox": [ + 312, + 353, + 397, + 435 + ], + "spans": [ + { + "bbox": [ + 312, + 353, + 397, + 435 + ], + "type": "image", + "image_path": "dd2dae5038367f85e716992705228d85cfed4fff5609bcaadfd9baa3415e36b6.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 403, + 353, + 487, + 435 + ], + "blocks": [ + { + "bbox": [ + 403, + 353, + 487, + 435 + ], + "lines": [ + { + "bbox": [ + 403, + 353, + 487, + 435 + ], + "spans": [ + { + "bbox": [ + 403, + 353, + 487, + 435 + ], + "type": "image", + "image_path": "a25788560611502b56750b8036243b4afafc5fde1438409b033768440c793002.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 137, + 437, + 472, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 437, + 472, + 469 + ], + "spans": [ + { + "bbox": [ + 137, + 437, + 472, + 469 + ], + "type": "text", + "content": "Input Text: \"Extend the image to the left and right, filling the black areas with a natural continuation of the snowy mountain landscape, ski path, trees, and sky. Keep the lighting, shadows, and textures consistent with the original image.\"" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 130, + 470, + 214, + 552 + ], + "blocks": [ + { + "bbox": [ + 130, + 470, + 214, + 552 + ], + "lines": [ + { + "bbox": [ + 130, + 470, + 214, + 552 + ], + "spans": [ + { + "bbox": [ + 130, + 470, + 214, + 552 + ], + "type": "image", + "image_path": "999497601ef88fdd1c39e3f4a28faa7e03a78e030514867e7db3e4a3be244eff.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 221, + 470, + 306, + 552 + ], + "blocks": [ + { + "bbox": [ + 221, + 470, + 306, + 552 + ], + "lines": [ + { + "bbox": [ + 221, + 470, + 306, + 552 + ], + "spans": [ + { + "bbox": [ + 221, + 470, + 306, + 552 + ], + "type": "image", + "image_path": "5fd37721ff6283442746e2e6bca6d7639893b65c0c252601a4e8feeaec442651.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 312, + 470, + 397, + 552 + ], + "blocks": [ + { + "bbox": [ + 312, + 470, + 397, + 552 + ], + "lines": [ + { + "bbox": [ + 312, + 470, + 397, + 552 + ], + "spans": [ + { + "bbox": [ + 312, + 470, + 397, + 552 + ], + "type": "image", + "image_path": "899da298140f306785ebb56f86b92a0e2dfa83b94136c6477ed20cbea1efa3fe.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 403, + 470, + 487, + 552 + ], + "blocks": [ + { + "bbox": [ + 403, + 470, + 487, + 552 + ], + "lines": [ + { + "bbox": [ + 403, + 470, + 487, + 552 + ], + "spans": [ + { + "bbox": [ + 403, + 470, + 487, + 552 + ], + "type": "image", + "image_path": "80963c1dbf62d49450d0bd1cbdf60c75eaa325a5af95ee28f199c5cd0699b838.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 133, + 554, + 466, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 554, + 466, + 597 + ], + "spans": [ + { + "bbox": [ + 133, + 554, + 466, + 597 + ], + "type": "text", + "content": "Input Text: \"Outpaint the center of this panoramic image to naturally connect the left and right desert landscape. Fill the middle area with a realistic continuation of the rocky desert terrain and blue sky with clouds, ensuring seamless blending and consistent perspective.\"" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 148, + 600, + 201, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 600, + 201, + 611 + ], + "spans": [ + { + "bbox": [ + 148, + 600, + 201, + 611 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 249, + 601, + 279, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 601, + 279, + 610 + ], + "spans": [ + { + "bbox": [ + 249, + 601, + 279, + 610 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 320, + 601, + 387, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 601, + 387, + 610 + ], + "spans": [ + { + "bbox": [ + 320, + 601, + 387, + 610 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 424, + 601, + 470, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 424, + 601, + 470, + 610 + ], + "spans": [ + { + "bbox": [ + 424, + 601, + 470, + 610 + ], + "type": "text", + "content": "Dream 360" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "42" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 41 + }, + { + "para_blocks": [ + { + "bbox": [ + 116, + 124, + 173, + 135 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 124, + 173, + 135 + ], + "spans": [ + { + "bbox": [ + 116, + 124, + 173, + 135 + ], + "type": "text", + "content": "Colorization" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 227, + 138, + 372, + 153 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 138, + 372, + 153 + ], + "spans": [ + { + "bbox": [ + 227, + 138, + 372, + 153 + ], + "type": "text", + "content": "Evaluation: Image Quality." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 96, + 158, + 194, + 224 + ], + "blocks": [ + { + "bbox": [ + 96, + 158, + 194, + 224 + ], + "lines": [ + { + "bbox": [ + 96, + 158, + 194, + 224 + ], + "spans": [ + { + "bbox": [ + 96, + 158, + 194, + 224 + ], + "type": "image", + "image_path": "a545885da447cbcefa593efa1163db12a6ecaa2cdbbf6e96a524f3a907d3724c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 200, + 158, + 298, + 224 + ], + "blocks": [ + { + "bbox": [ + 200, + 158, + 298, + 224 + ], + "lines": [ + { + "bbox": [ + 200, + 158, + 298, + 224 + ], + "spans": [ + { + "bbox": [ + 200, + 158, + 298, + 224 + ], + "type": "image", + "image_path": "b33e1d9b8d191b6badbe6994b410d984583d93bb0eec0eb614f21170a2cdb7d5.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 304, + 158, + 403, + 224 + ], + "blocks": [ + { + "bbox": [ + 304, + 158, + 403, + 224 + ], + "lines": [ + { + "bbox": [ + 304, + 158, + 403, + 224 + ], + "spans": [ + { + "bbox": [ + 304, + 158, + 403, + 224 + ], + "type": "image", + "image_path": "b0d254da72b338ef668b468732303651a5ecbf992ea9575f30ca06355f2c2edc.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 413, + 158, + 512, + 224 + ], + "blocks": [ + { + "bbox": [ + 413, + 158, + 512, + 224 + ], + "lines": [ + { + "bbox": [ + 413, + 158, + 512, + 224 + ], + "spans": [ + { + "bbox": [ + 413, + 158, + 512, + 224 + ], + "type": "image", + "image_path": "0bad4b8a9e257c2529bf84609f3ee26b5086573d0af51df270d4d92513151ff4.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 102, + 229, + 423, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 229, + 423, + 243 + ], + "spans": [ + { + "bbox": [ + 102, + 229, + 423, + 243 + ], + "type": "text", + "content": "Input Text: \"Colorize it: a red car parked on a cobblestone street.\"" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 96, + 249, + 194, + 316 + ], + "blocks": [ + { + "bbox": [ + 96, + 249, + 194, + 316 + ], + "lines": [ + { + "bbox": [ + 96, + 249, + 194, + 316 + ], + "spans": [ + { + "bbox": [ + 96, + 249, + 194, + 316 + ], + "type": "image", + "image_path": "3520766d4bd31fe95534ccef2679e0d3237151e64a06cfbdbcc03285013be219.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 200, + 250, + 299, + 316 + ], + "blocks": [ + { + "bbox": [ + 200, + 250, + 299, + 316 + ], + "lines": [ + { + "bbox": [ + 200, + 250, + 299, + 316 + ], + "spans": [ + { + "bbox": [ + 200, + 250, + 299, + 316 + ], + "type": "image", + "image_path": "5daddf44b2febe0e6103c50d850ffed87ab4624a8da3303ae0a44eb83321b1ed.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 304, + 250, + 403, + 316 + ], + "blocks": [ + { + "bbox": [ + 304, + 250, + 403, + 316 + ], + "lines": [ + { + "bbox": [ + 304, + 250, + 403, + 316 + ], + "spans": [ + { + "bbox": [ + 304, + 250, + 403, + 316 + ], + "type": "image", + "image_path": "82452cfd1770b08e14ee6fdce267d83915ea906cdd31384da9f70cf3e1cf38c3.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 413, + 249, + 511, + 316 + ], + "blocks": [ + { + "bbox": [ + 413, + 249, + 511, + 316 + ], + "lines": [ + { + "bbox": [ + 413, + 249, + 511, + 316 + ], + "spans": [ + { + "bbox": [ + 413, + 249, + 511, + 316 + ], + "type": "image", + "image_path": "001d30c3201ae2f4664bf8ee499b66e8df811fc307c2c93884ed70982fa958ca.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 102, + 319, + 490, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 319, + 490, + 345 + ], + "spans": [ + { + "bbox": [ + 102, + 319, + 490, + 345 + ], + "type": "text", + "content": "Input Text: \"Colorize it: a couple of white and black kittens that are sitting in the purple grass.\"" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 96, + 346, + 194, + 412 + ], + "blocks": [ + { + "bbox": [ + 96, + 346, + 194, + 412 + ], + "lines": [ + { + "bbox": [ + 96, + 346, + 194, + 412 + ], + "spans": [ + { + "bbox": [ + 96, + 346, + 194, + 412 + ], + "type": "image", + "image_path": "e0ea300e9865c0a0b5c3f4003da7770db1b8cd75b5f648d2fb6667866807f1bb.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 200, + 346, + 298, + 412 + ], + "blocks": [ + { + "bbox": [ + 200, + 346, + 298, + 412 + ], + "lines": [ + { + "bbox": [ + 200, + 346, + 298, + 412 + ], + "spans": [ + { + "bbox": [ + 200, + 346, + 298, + 412 + ], + "type": "image", + "image_path": "2aea31980297b8bd9effa16d06efa9dd2a95f1e6a7b7a99dc48e7bb6e4ed6a27.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 304, + 346, + 403, + 412 + ], + "blocks": [ + { + "bbox": [ + 304, + 346, + 403, + 412 + ], + "lines": [ + { + "bbox": [ + 304, + 346, + 403, + 412 + ], + "spans": [ + { + "bbox": [ + 304, + 346, + 403, + 412 + ], + "type": "image", + "image_path": "cc99e1fe7082b4516a42bfa326f45cb5f3fae6ae0f81c321344103d47d80cbb9.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 413, + 346, + 511, + 413 + ], + "blocks": [ + { + "bbox": [ + 413, + 346, + 511, + 413 + ], + "lines": [ + { + "bbox": [ + 413, + 346, + 511, + 413 + ], + "spans": [ + { + "bbox": [ + 413, + 346, + 511, + 413 + ], + "type": "image", + "image_path": "8f533a46531ffe4ae8d62f47eb7fc7615dbfab620a667fb4d1593863f2cb286f.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 102, + 415, + 450, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 415, + 450, + 426 + ], + "spans": [ + { + "bbox": [ + 102, + 415, + 450, + 426 + ], + "type": "text", + "content": "Input Text: \"Colorize it: a red sports car parked on the side of a street.\"" + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 99, + 430, + 196, + 528 + ], + "blocks": [ + { + "bbox": [ + 99, + 430, + 196, + 528 + ], + "lines": [ + { + "bbox": [ + 99, + 430, + 196, + 528 + ], + "spans": [ + { + "bbox": [ + 99, + 430, + 196, + 528 + ], + "type": "image", + "image_path": "d10a34e202b94e2de1a4f2d1cd215f5be631f1577e3a8ebeac90787c1852e8e9.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 586, + 532, + 674 + ], + "lines": [ + { + "bbox": [ + 77, + 586, + 532, + 674 + ], + "spans": [ + { + "bbox": [ + 77, + 586, + 532, + 674 + ], + "type": "text", + "content": "Figure 32: Task: Image colorization, aiming to add realistic and semantically consistent color to grayscale images based on textual prompts. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and CtrlColor [59], focusing on their ability to follow instructions and produce visually natural colorized outputs. Observations: CtrlColor performs the best overall, generating vivid and accurate colors that precisely match the prompts—such as green lips and yellow sunglasses in the last row, or the purple grass and kitten hues in the second. GPT-4o provides reasonably faithful colorization but often lacks richness or misinterprets tones (e.g., slightly dull red in the third row or inconsistent purple grass). Gemini 2.0 Flash is more vivid than GPT-4o but tends to oversaturate or produce stylized effects, especially on human features." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 203, + 430, + 301, + 527 + ], + "blocks": [ + { + "bbox": [ + 203, + 430, + 301, + 527 + ], + "lines": [ + { + "bbox": [ + 203, + 430, + 301, + 527 + ], + "spans": [ + { + "bbox": [ + 203, + 430, + 301, + 527 + ], + "type": "image", + "image_path": "ab3420504662b7f8244c018e079e1cffd82ab918262a3443ff4a8a16e52e66fe.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 307, + 430, + 405, + 527 + ], + "blocks": [ + { + "bbox": [ + 307, + 430, + 405, + 527 + ], + "lines": [ + { + "bbox": [ + 307, + 430, + 405, + 527 + ], + "spans": [ + { + "bbox": [ + 307, + 430, + 405, + 527 + ], + "type": "image", + "image_path": "45547d6bbb7a37e719ea603008ab1f1389806043fdae0a8ed425ac866ed4b27c.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 415, + 430, + 514, + 527 + ], + "blocks": [ + { + "bbox": [ + 415, + 430, + 514, + 527 + ], + "lines": [ + { + "bbox": [ + 415, + 430, + 514, + 527 + ], + "spans": [ + { + "bbox": [ + 415, + 430, + 514, + 527 + ], + "type": "image", + "image_path": "cec51b759ae2fd86f4a6cd9245c40a9af755b055698dd1da10fcbcf1565849ae.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 529, + 471, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 529, + 471, + 543 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 471, + 543 + ], + "type": "text", + "content": "Input Text: \"Colorize it: a woman wearing a yellow sunglasses with green lips\"" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 116, + 549, + 176, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 549, + 176, + 562 + ], + "spans": [ + { + "bbox": [ + 116, + 549, + 176, + 562 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 231, + 549, + 266, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 549, + 266, + 559 + ], + "spans": [ + { + "bbox": [ + 231, + 549, + 266, + 559 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 315, + 549, + 392, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 549, + 392, + 559 + ], + "spans": [ + { + "bbox": [ + 315, + 549, + 392, + 559 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 444, + 549, + 488, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 444, + 549, + 488, + 559 + ], + "spans": [ + { + "bbox": [ + 444, + 549, + 488, + 559 + ], + "type": "text", + "content": "CtrlColor" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "43" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 42 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 121, + 184, + 133 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 121, + 184, + 133 + ], + "spans": [ + { + "bbox": [ + 104, + 121, + 184, + 133 + ], + "type": "text", + "content": "Shadow Removal" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 236, + 138, + 249, + 152 + ], + "blocks": [ + { + "bbox": [ + 236, + 138, + 249, + 152 + ], + "lines": [ + { + "bbox": [ + 236, + 138, + 249, + 152 + ], + "spans": [ + { + "bbox": [ + 236, + 138, + 249, + 152 + ], + "type": "image", + "image_path": "cfa4811e58b535cdfd35ac61baf40239638c41912743eed71869ca83aded9682.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 250, + 141, + 381, + 154 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 250, + 141, + 381, + 154 + ], + "spans": [ + { + "bbox": [ + 250, + 141, + 381, + 154 + ], + "type": "text", + "content": "Evaluation: Image Quality." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 100, + 157, + 203, + 236 + ], + "blocks": [ + { + "bbox": [ + 100, + 157, + 203, + 236 + ], + "lines": [ + { + "bbox": [ + 100, + 157, + 203, + 236 + ], + "spans": [ + { + "bbox": [ + 100, + 157, + 203, + 236 + ], + "type": "image", + "image_path": "e533493affd9962f40a067f07476309f50b38f805e9b7e4cb111bd1d65a85975.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 203, + 157, + 306, + 236 + ], + "blocks": [ + { + "bbox": [ + 203, + 157, + 306, + 236 + ], + "lines": [ + { + "bbox": [ + 203, + 157, + 306, + 236 + ], + "spans": [ + { + "bbox": [ + 203, + 157, + 306, + 236 + ], + "type": "image", + "image_path": "51cc0442254dbc7d3906e354e679e7c97663cf26955df12ad376ac2c249891a4.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 306, + 157, + 414, + 236 + ], + "blocks": [ + { + "bbox": [ + 306, + 157, + 414, + 236 + ], + "lines": [ + { + "bbox": [ + 306, + 157, + 414, + 236 + ], + "spans": [ + { + "bbox": [ + 306, + 157, + 414, + 236 + ], + "type": "image", + "image_path": "038bd8bcbb952c2d249e6b969e11edf6c615ea7d80b89e647965ce83d7b12973.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 416, + 157, + 520, + 236 + ], + "blocks": [ + { + "bbox": [ + 416, + 157, + 520, + 236 + ], + "lines": [ + { + "bbox": [ + 416, + 157, + 520, + 236 + ], + "spans": [ + { + "bbox": [ + 416, + 157, + 520, + 236 + ], + "type": "image", + "image_path": "5d3c5fa7d9c2e13ae59f463d54f3c900277b032d69c97c5dd725781b8fc54200.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 100, + 239, + 202, + 317 + ], + "blocks": [ + { + "bbox": [ + 100, + 239, + 202, + 317 + ], + "lines": [ + { + "bbox": [ + 100, + 239, + 202, + 317 + ], + "spans": [ + { + "bbox": [ + 100, + 239, + 202, + 317 + ], + "type": "image", + "image_path": "8280c76d3783095ac597ffe6112e8e97d4768a43158ffbeaed59287bec81116b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 575, + 533, + 676 + ], + "lines": [ + { + "bbox": [ + 77, + 575, + 533, + 676 + ], + "spans": [ + { + "bbox": [ + 77, + 575, + 533, + 676 + ], + "type": "text", + "content": "Figure 33: Task: Shadow removal, aiming to eliminate harsh shadows while preserving the integrity of the scene, textures, and lighting balance. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and ShadowRefiner [25] to evaluate how well each method removes shadows and retains original object fidelity and lighting consistency. Observations: ShadowRefiner consistently achieves the most natural and effective shadow removal. It produces even, diffuse lighting across all scenes—e.g., softening shadows without distorting textures in complex scenes like the miniatures and dog portrait. Gemini 2.0 Flash removes shadows reasonably but occasionally leaves faint traces or flattens contrast, as seen in the second and fourth rows. GPT-4o shows stronger shadow reduction than Gemini 2.0 Flash but sometimes alters surface brightness or loses detail fidelity. ShadowRefiner best preserves the original color tones and textures while eliminating harsh shadows." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 203, + 239, + 309, + 318 + ], + "blocks": [ + { + "bbox": [ + 203, + 239, + 309, + 318 + ], + "lines": [ + { + "bbox": [ + 203, + 239, + 309, + 318 + ], + "spans": [ + { + "bbox": [ + 203, + 239, + 309, + 318 + ], + "type": "image", + "image_path": "abb6c191cd8f3dbc58255f85b9eca1ff0ecbe31840d418b03345458fcd36ab20.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 239, + 413, + 318 + ], + "blocks": [ + { + "bbox": [ + 310, + 239, + 413, + 318 + ], + "lines": [ + { + "bbox": [ + 310, + 239, + 413, + 318 + ], + "spans": [ + { + "bbox": [ + 310, + 239, + 413, + 318 + ], + "type": "image", + "image_path": "898c8f34d0da327db20aba6653163e933c3b881344fb81df6b0f416dff0a65df.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 416, + 239, + 519, + 317 + ], + "blocks": [ + { + "bbox": [ + 416, + 239, + 519, + 317 + ], + "lines": [ + { + "bbox": [ + 416, + 239, + 519, + 317 + ], + "spans": [ + { + "bbox": [ + 416, + 239, + 519, + 317 + ], + "type": "image", + "image_path": "1bef728ebb6bc7f9c952867864380f4412a3ff879334ec345c1602238ad09c54.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 100, + 318, + 202, + 396 + ], + "blocks": [ + { + "bbox": [ + 100, + 318, + 202, + 396 + ], + "lines": [ + { + "bbox": [ + 100, + 318, + 202, + 396 + ], + "spans": [ + { + "bbox": [ + 100, + 318, + 202, + 396 + ], + "type": "image", + "image_path": "51b49e12056077fe1afb554b56beba3c707955dbdefb32d0bdf28a7d9683ab6a.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 204, + 318, + 309, + 397 + ], + "blocks": [ + { + "bbox": [ + 204, + 318, + 309, + 397 + ], + "lines": [ + { + "bbox": [ + 204, + 318, + 309, + 397 + ], + "spans": [ + { + "bbox": [ + 204, + 318, + 309, + 397 + ], + "type": "image", + "image_path": "e53143ad29978bfb50a47d639d116221922ece9fa13749ad760e4f2ca7cbecbf.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 310, + 318, + 413, + 397 + ], + "blocks": [ + { + "bbox": [ + 310, + 318, + 413, + 397 + ], + "lines": [ + { + "bbox": [ + 310, + 318, + 413, + 397 + ], + "spans": [ + { + "bbox": [ + 310, + 318, + 413, + 397 + ], + "type": "image", + "image_path": "aa0187c33d100a82b28f2505575ea9e419bbb4b364bc8feea7bf801423e56d3a.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 416, + 318, + 519, + 396 + ], + "blocks": [ + { + "bbox": [ + 416, + 318, + 519, + 396 + ], + "lines": [ + { + "bbox": [ + 416, + 318, + 519, + 396 + ], + "spans": [ + { + "bbox": [ + 416, + 318, + 519, + 396 + ], + "type": "image", + "image_path": "e391d4c6d98af6fcc61c7f1d8e60d08040956f96e2c5d61ef9f3aac4d730f7a0.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 100, + 399, + 204, + 479 + ], + "blocks": [ + { + "bbox": [ + 100, + 399, + 204, + 479 + ], + "lines": [ + { + "bbox": [ + 100, + 399, + 204, + 479 + ], + "spans": [ + { + "bbox": [ + 100, + 399, + 204, + 479 + ], + "type": "image", + "image_path": "c159dda7b72f6548e3a35745859137667be43c3d3382e30061df21040584ebd3.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 205, + 399, + 309, + 479 + ], + "blocks": [ + { + "bbox": [ + 205, + 399, + 309, + 479 + ], + "lines": [ + { + "bbox": [ + 205, + 399, + 309, + 479 + ], + "spans": [ + { + "bbox": [ + 205, + 399, + 309, + 479 + ], + "type": "image", + "image_path": "3fda08a0f5b089580c34cfd81ff50f99e0d4660564c4d9f7d8ce3de3d5c373c0.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 310, + 399, + 414, + 479 + ], + "blocks": [ + { + "bbox": [ + 310, + 399, + 414, + 479 + ], + "lines": [ + { + "bbox": [ + 310, + 399, + 414, + 479 + ], + "spans": [ + { + "bbox": [ + 310, + 399, + 414, + 479 + ], + "type": "image", + "image_path": "af45afc3aa4a8340ccb4e22deeda2a924e0315faecedd77d56e667ae464a5ec2.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 416, + 399, + 518, + 479 + ], + "blocks": [ + { + "bbox": [ + 416, + 399, + 518, + 479 + ], + "lines": [ + { + "bbox": [ + 416, + 399, + 518, + 479 + ], + "spans": [ + { + "bbox": [ + 416, + 399, + 518, + 479 + ], + "type": "image", + "image_path": "f7a549c39bdf0880255cebc34e0049aef8e949afe607ba2d7ca38743e5ee21e8.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 483, + 506, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 483, + 506, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 506, + 521 + ], + "type": "text", + "content": "Input Text: \"Remove all harsh shadows from the image. Make the lighting even and soft across the entire scene. Preserve all objects, colors, and details exactly as they are. Make it look like it was taken under diffuse studio lighting.\"" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 122, + 533, + 184, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 533, + 184, + 548 + ], + "spans": [ + { + "bbox": [ + 122, + 533, + 184, + 548 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 233, + 534, + 269, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 534, + 269, + 545 + ], + "spans": [ + { + "bbox": [ + 233, + 534, + 269, + 545 + ], + "type": "text", + "content": "GPT40" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 320, + 534, + 399, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 534, + 399, + 545 + ], + "spans": [ + { + "bbox": [ + 320, + 534, + 399, + 545 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 435, + 534, + 507, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 435, + 534, + 507, + 545 + ], + "spans": [ + { + "bbox": [ + 435, + 534, + 507, + 545 + ], + "type": "text", + "content": "ShadowRefiner" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "44" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 43 + }, + { + "para_blocks": [ + { + "bbox": [ + 97, + 78, + 191, + 89 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 78, + 191, + 89 + ], + "spans": [ + { + "bbox": [ + 97, + 78, + 191, + 89 + ], + "type": "text", + "content": "Reflection Removal" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 228, + 100, + 241, + 115 + ], + "blocks": [ + { + "bbox": [ + 228, + 100, + 241, + 115 + ], + "lines": [ + { + "bbox": [ + 228, + 100, + 241, + 115 + ], + "spans": [ + { + "bbox": [ + 228, + 100, + 241, + 115 + ], + "type": "image", + "image_path": "a92950a6ffd6c5949ed67dbcd8ef27f7a4455d1b77975ddb550332fdae784d04.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 242, + 104, + 372, + 117 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 104, + 372, + 117 + ], + "spans": [ + { + "bbox": [ + 242, + 104, + 372, + 117 + ], + "type": "text", + "content": "Evaluation: Image Quality." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 103, + 124, + 197, + 201 + ], + "blocks": [ + { + "bbox": [ + 103, + 124, + 197, + 201 + ], + "lines": [ + { + "bbox": [ + 103, + 124, + 197, + 201 + ], + "spans": [ + { + "bbox": [ + 103, + 124, + 197, + 201 + ], + "type": "image", + "image_path": "26dcc1fa3b38be01934c8e24ea493d1a80141258eb76a34f67d30c186588e554.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 207, + 125, + 301, + 202 + ], + "blocks": [ + { + "bbox": [ + 207, + 125, + 301, + 202 + ], + "lines": [ + { + "bbox": [ + 207, + 125, + 301, + 202 + ], + "spans": [ + { + "bbox": [ + 207, + 125, + 301, + 202 + ], + "type": "image", + "image_path": "8f1ec7ef5d643c0bfda5c6dcee55e55fbae700384aa8d8ff28e6f695f19abd73.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 310, + 125, + 405, + 203 + ], + "blocks": [ + { + "bbox": [ + 310, + 125, + 405, + 203 + ], + "lines": [ + { + "bbox": [ + 310, + 125, + 405, + 203 + ], + "spans": [ + { + "bbox": [ + 310, + 125, + 405, + 203 + ], + "type": "image", + "image_path": "e8c0506441fcc7c04d242543b53f3d7af01afae300d6edb7c5571fbe34319a8c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 415, + 125, + 508, + 203 + ], + "blocks": [ + { + "bbox": [ + 415, + 125, + 508, + 203 + ], + "lines": [ + { + "bbox": [ + 415, + 125, + 508, + 203 + ], + "spans": [ + { + "bbox": [ + 415, + 125, + 508, + 203 + ], + "type": "image", + "image_path": "fce6882ef70a409cc3043ec9739951fd8feb6eec25edb8d9c56a80dda6894b5f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 204, + 496, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 204, + 496, + 229 + ], + "spans": [ + { + "bbox": [ + 104, + 204, + 496, + 229 + ], + "type": "text", + "content": "Input Text: \"Remove window reflections, preserve interior details clearly visible through the glass, maintain natural lighting and perspective, photo-realistic result.\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 103, + 232, + 197, + 310 + ], + "blocks": [ + { + "bbox": [ + 103, + 232, + 197, + 310 + ], + "lines": [ + { + "bbox": [ + 103, + 232, + 197, + 310 + ], + "spans": [ + { + "bbox": [ + 103, + 232, + 197, + 310 + ], + "type": "image", + "image_path": "a39507e46b3ffd1bbdf0ff1d20f603e166fb2c83a3cb4b81c304a49bc56d055e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 206, + 232, + 300, + 310 + ], + "blocks": [ + { + "bbox": [ + 206, + 232, + 300, + 310 + ], + "lines": [ + { + "bbox": [ + 206, + 232, + 300, + 310 + ], + "spans": [ + { + "bbox": [ + 206, + 232, + 300, + 310 + ], + "type": "image", + "image_path": "d780d806748e7a2e2d245f4bb75612e3a332fab55a1cc6b0fa0ad203f17dd8b5.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 311, + 232, + 405, + 310 + ], + "blocks": [ + { + "bbox": [ + 311, + 232, + 405, + 310 + ], + "lines": [ + { + "bbox": [ + 311, + 232, + 405, + 310 + ], + "spans": [ + { + "bbox": [ + 311, + 232, + 405, + 310 + ], + "type": "image", + "image_path": "293cc3e02dece56450be7a6542b0151c4dcffe5012347d53906f237826b4a55e.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 415, + 232, + 508, + 310 + ], + "blocks": [ + { + "bbox": [ + 415, + 232, + 508, + 310 + ], + "lines": [ + { + "bbox": [ + 415, + 232, + 508, + 310 + ], + "spans": [ + { + "bbox": [ + 415, + 232, + 508, + 310 + ], + "type": "image", + "image_path": "a3941e54ef0c8ca433e936496f30948b94b4e9f8ef2115b981275376d66cf6b7.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 97, + 313, + 515, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 313, + 515, + 339 + ], + "spans": [ + { + "bbox": [ + 97, + 313, + 515, + 339 + ], + "type": "text", + "content": "Input Text: \"Remove the reflection of buildings on the wet ground surface, make it look like a clean and dry textured concrete floor, realistic lighting and natural color tones.\"" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 103, + 342, + 197, + 432 + ], + "blocks": [ + { + "bbox": [ + 103, + 342, + 197, + 432 + ], + "lines": [ + { + "bbox": [ + 103, + 342, + 197, + 432 + ], + "spans": [ + { + "bbox": [ + 103, + 342, + 197, + 432 + ], + "type": "image", + "image_path": "2997ae5bbb03c3903ad7d778e996f50f9bff8f0a1ab2e783b344083f30f3cce3.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 623, + 533, + 723 + ], + "lines": [ + { + "bbox": [ + 77, + 623, + 533, + 723 + ], + "spans": [ + { + "bbox": [ + 77, + 623, + 533, + 723 + ], + "type": "text", + "content": "Figure 34: Task: Reflection removal, aiming to eliminate unwanted reflections from transparent or reflective surfaces while preserving original content and realistic lighting. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and DSIT [39], assessing their ability to remove reflections while maintaining scene realism, texture fidelity, and lighting consistency. Observations: DSIT shows the most effective and natural reflection removal across all examples. It restores interior visibility through windows (e.g., bed and car interior) while preserving lighting and geometry. Gemini 2.0 Flash removes some reflections but often leaves faded traces or dulls textures, especially on glass doors and wet pavement. GPT-4o performs better than Gemini 2.0 Flash in preserving background details but sometimes alters color tones and sharpness. Overall, DSIT provides the cleanest and most photorealistic results, especially for transparent surfaces like glass and reflective wet ground." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 206, + 342, + 300, + 432 + ], + "blocks": [ + { + "bbox": [ + 206, + 342, + 300, + 432 + ], + "lines": [ + { + "bbox": [ + 206, + 342, + 300, + 432 + ], + "spans": [ + { + "bbox": [ + 206, + 342, + 300, + 432 + ], + "type": "image", + "image_path": "145f3c5298c9786c810dc961b48e4e79d1a401610fd467498400ef8e5de81c24.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 311, + 342, + 405, + 432 + ], + "blocks": [ + { + "bbox": [ + 311, + 342, + 405, + 432 + ], + "lines": [ + { + "bbox": [ + 311, + 342, + 405, + 432 + ], + "spans": [ + { + "bbox": [ + 311, + 342, + 405, + 432 + ], + "type": "image", + "image_path": "517e9f2a30d20e6e67d1e7cc8d3275c188d4ba9fe1e3f526c5ea127a17812855.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 414, + 342, + 508, + 432 + ], + "blocks": [ + { + "bbox": [ + 414, + 342, + 508, + 432 + ], + "lines": [ + { + "bbox": [ + 414, + 342, + 508, + 432 + ], + "spans": [ + { + "bbox": [ + 414, + 342, + 508, + 432 + ], + "type": "image", + "image_path": "5d871e96ad190ac8245ba7e97f4356f76317189e597d17530c30c48b2ef31192.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 101, + 434, + 514, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 101, + 434, + 514, + 460 + ], + "spans": [ + { + "bbox": [ + 101, + 434, + 514, + 460 + ], + "type": "text", + "content": "Input Text: \"Remove reflections from the glass doors, make the interior clearly visible with natural lighting and sharp details, keep the golden door frame realistic and intact.\"" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 104, + 464, + 197, + 540 + ], + "blocks": [ + { + "bbox": [ + 104, + 464, + 197, + 540 + ], + "lines": [ + { + "bbox": [ + 104, + 464, + 197, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 197, + 540 + ], + "type": "image", + "image_path": "4d9a72541da86687fce106cdee64c185f7126d2f2e035a9623361561c2006136.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 206, + 464, + 300, + 539 + ], + "blocks": [ + { + "bbox": [ + 206, + 464, + 300, + 539 + ], + "lines": [ + { + "bbox": [ + 206, + 464, + 300, + 539 + ], + "spans": [ + { + "bbox": [ + 206, + 464, + 300, + 539 + ], + "type": "image", + "image_path": "06b68c2bd1d1e1e567e71454157f92909dcb91789d0b950ba4f61dd09198910d.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 311, + 464, + 405, + 539 + ], + "blocks": [ + { + "bbox": [ + 311, + 464, + 405, + 539 + ], + "lines": [ + { + "bbox": [ + 311, + 464, + 405, + 539 + ], + "spans": [ + { + "bbox": [ + 311, + 464, + 405, + 539 + ], + "type": "image", + "image_path": "1f17bc39103c2c1bcbdf239933f68441984fad34a4c78c96991f6e9a246af36a.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 415, + 464, + 508, + 539 + ], + "blocks": [ + { + "bbox": [ + 415, + 464, + 508, + 539 + ], + "lines": [ + { + "bbox": [ + 415, + 464, + 508, + 539 + ], + "spans": [ + { + "bbox": [ + 415, + 464, + 508, + 539 + ], + "type": "image", + "image_path": "6bf819c8a7dad12a034e9854583b927e93387ef6c95c36f44d5e4a7ebbd9eef8.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 102, + 541, + 496, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 541, + 496, + 578 + ], + "spans": [ + { + "bbox": [ + 102, + 541, + 496, + 578 + ], + "type": "text", + "content": "Input Text: \"Remove reflections from the car window, make the interior of the vehicle clearly visible, preserve natural lighting and realistic textures, keep the car frame untouched.\"" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 117, + 587, + 178, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 587, + 178, + 600 + ], + "spans": [ + { + "bbox": [ + 117, + 587, + 178, + 600 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 233, + 587, + 268, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 587, + 268, + 598 + ], + "spans": [ + { + "bbox": [ + 233, + 587, + 268, + 598 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 319, + 586, + 398, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 586, + 398, + 598 + ], + "spans": [ + { + "bbox": [ + 319, + 586, + 398, + 598 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 449, + 586, + 477, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 449, + 586, + 477, + 598 + ], + "spans": [ + { + "bbox": [ + 449, + 586, + 477, + 598 + ], + "type": "text", + "content": "DSIT" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "45" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 44 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 80, + 214, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 80, + 214, + 92 + ], + "spans": [ + { + "bbox": [ + 132, + 80, + 214, + 92 + ], + "type": "text", + "content": "Image Re-lightning" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 247, + 94, + 259, + 106 + ], + "blocks": [ + { + "bbox": [ + 247, + 94, + 259, + 106 + ], + "lines": [ + { + "bbox": [ + 247, + 94, + 259, + 106 + ], + "spans": [ + { + "bbox": [ + 247, + 94, + 259, + 106 + ], + "type": "image", + "image_path": "1a9a2fe4848dccc2cbdc43a21645b7d9153e4a783d4634d91cd4b7240855ca79.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 260, + 96, + 387, + 108 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 260, + 96, + 387, + 108 + ], + "spans": [ + { + "bbox": [ + 260, + 96, + 387, + 108 + ], + "type": "text", + "content": "Evaluation: Light consistency." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 129, + 106, + 194, + 171 + ], + "blocks": [ + { + "bbox": [ + 129, + 106, + 194, + 171 + ], + "lines": [ + { + "bbox": [ + 129, + 106, + 194, + 171 + ], + "spans": [ + { + "bbox": [ + 129, + 106, + 194, + 171 + ], + "type": "image", + "image_path": "303aadcf1f413c53fe51a10f2bbdb1d2f38eaebbbf1bafbaf65caa46e66c8863.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 199, + 108, + 262, + 171 + ], + "blocks": [ + { + "bbox": [ + 199, + 108, + 262, + 171 + ], + "lines": [ + { + "bbox": [ + 199, + 108, + 262, + 171 + ], + "spans": [ + { + "bbox": [ + 199, + 108, + 262, + 171 + ], + "type": "image", + "image_path": "73a85fa3c615d1faa5d51ccafc720c7d1d72730add58f7b85b6f81378602781d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 269, + 108, + 339, + 171 + ], + "blocks": [ + { + "bbox": [ + 269, + 108, + 339, + 171 + ], + "lines": [ + { + "bbox": [ + 269, + 108, + 339, + 171 + ], + "spans": [ + { + "bbox": [ + 269, + 108, + 339, + 171 + ], + "type": "image", + "image_path": "89185a1d1d4373656d69efacb10271368adb3f6ab4aa29bb7ce10ccd11ba70a6.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 345, + 108, + 414, + 171 + ], + "blocks": [ + { + "bbox": [ + 345, + 108, + 414, + 171 + ], + "lines": [ + { + "bbox": [ + 345, + 108, + 414, + 171 + ], + "spans": [ + { + "bbox": [ + 345, + 108, + 414, + 171 + ], + "type": "image", + "image_path": "33aeab41543cb1f610d264106b611a2050ccfaaf21583d7c7034d3b1ef94d142.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 420, + 108, + 482, + 171 + ], + "blocks": [ + { + "bbox": [ + 420, + 108, + 482, + 171 + ], + "lines": [ + { + "bbox": [ + 420, + 108, + 482, + 171 + ], + "spans": [ + { + "bbox": [ + 420, + 108, + 482, + 171 + ], + "type": "image", + "image_path": "1186cfc667444853a5ea105221e7465b842983362ddd848dab0f81318dea818f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 133, + 173, + 274, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 173, + 274, + 184 + ], + "spans": [ + { + "bbox": [ + 133, + 173, + 274, + 184 + ], + "type": "text", + "content": "Input Text: \"Given two input images:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 133, + 184, + 334, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 184, + 334, + 194 + ], + "spans": [ + { + "bbox": [ + 133, + 184, + 334, + 194 + ], + "type": "text", + "content": "Image 1: A classical marble statue in neutral lighting." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 133, + 194, + 402, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 194, + 402, + 204 + ], + "spans": [ + { + "bbox": [ + 133, + 194, + 402, + 204 + ], + "type": "text", + "content": "Image 2: A city street at night illuminated by neon pink and blue lights." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 133, + 203, + 473, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 203, + 473, + 222 + ], + "spans": [ + { + "bbox": [ + 133, + 203, + 473, + 222 + ], + "type": "text", + "content": "Please generate a relit version of the statue from Image 1, as if it were lit by the lighting conditions of Image 2." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 221, + 473, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 221, + 473, + 243 + ], + "spans": [ + { + "bbox": [ + 132, + 221, + 473, + 243 + ], + "type": "text", + "content": "The result should preserve the details and pose of the statue but apply realistic colored lighting and shadows consistent with the vibrant, mixed neon lighting of the second image." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 139, + 245, + 179, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 245, + 179, + 256 + ], + "spans": [ + { + "bbox": [ + 139, + 245, + 179, + 256 + ], + "type": "text", + "content": "Light Map" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 205, + 246, + 253, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 246, + 253, + 256 + ], + "spans": [ + { + "bbox": [ + 205, + 246, + 253, + 256 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 290, + 246, + 318, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 246, + 318, + 255 + ], + "spans": [ + { + "bbox": [ + 290, + 246, + 318, + 255 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 350, + 247, + 406, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 350, + 247, + 406, + 255 + ], + "spans": [ + { + "bbox": [ + 350, + 247, + 406, + 255 + ], + "type": "text", + "content": "Gemini Pro 2.0" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 436, + 247, + 470, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 436, + 247, + 470, + 257 + ], + "spans": [ + { + "bbox": [ + 436, + 247, + 470, + 257 + ], + "type": "text", + "content": "IC-Light" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 128, + 270, + 263, + 282 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 270, + 263, + 282 + ], + "spans": [ + { + "bbox": [ + 128, + 270, + 263, + 282 + ], + "type": "text", + "content": "Text-Prompt Image Re-lightning" + } + ] + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 130, + 293, + 208, + 388 + ], + "blocks": [ + { + "bbox": [ + 130, + 293, + 208, + 388 + ], + "lines": [ + { + "bbox": [ + 130, + 293, + 208, + 388 + ], + "spans": [ + { + "bbox": [ + 130, + 293, + 208, + 388 + ], + "type": "image", + "image_path": "dfe5bd0363834d7a517a03c99514ab82f92032ef90b648cf55ef38ce418a6054.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 218, + 293, + 296, + 387 + ], + "blocks": [ + { + "bbox": [ + 218, + 293, + 296, + 387 + ], + "lines": [ + { + "bbox": [ + 218, + 293, + 296, + 387 + ], + "spans": [ + { + "bbox": [ + 218, + 293, + 296, + 387 + ], + "type": "image", + "image_path": "7a43c02aec8d995221ba9a8b29fbfb292e2b680c4330dfae1451b1cbd1e28f15.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 312, + 294, + 390, + 388 + ], + "blocks": [ + { + "bbox": [ + 312, + 294, + 390, + 388 + ], + "lines": [ + { + "bbox": [ + 312, + 294, + 390, + 388 + ], + "spans": [ + { + "bbox": [ + 312, + 294, + 390, + 388 + ], + "type": "image", + "image_path": "8aee6376931ccae47da7774dcc80f87cb4655a39088d2495592ef2b55cc6e8ce.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 403, + 294, + 482, + 387 + ], + "blocks": [ + { + "bbox": [ + 403, + 294, + 482, + 387 + ], + "lines": [ + { + "bbox": [ + 403, + 294, + 482, + 387 + ], + "spans": [ + { + "bbox": [ + 403, + 294, + 482, + 387 + ], + "type": "image", + "image_path": "6b14e73a8dcd42c4119885d683c4b57a1f2394b2ba3d7cfed0c412d829af1eb7.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "bbox": [ + 133, + 388, + 463, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 388, + 463, + 410 + ], + "spans": [ + { + "bbox": [ + 133, + 388, + 463, + 410 + ], + "type": "text", + "content": "Input Text: \"Sunlight through the blinds, near window blinds with a reasonable background.\"" + } + ] + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 132, + 411, + 205, + 496 + ], + "blocks": [ + { + "bbox": [ + 132, + 411, + 205, + 496 + ], + "lines": [ + { + "bbox": [ + 132, + 411, + 205, + 496 + ], + "spans": [ + { + "bbox": [ + 132, + 411, + 205, + 496 + ], + "type": "image", + "image_path": "9353609b07179915947a49bbe7cb97c69d989d42af40bfbeb65de7a2a7c68425.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 649, + 533, + 738 + ], + "lines": [ + { + "bbox": [ + 77, + 649, + 533, + 738 + ], + "spans": [ + { + "bbox": [ + 77, + 649, + 533, + 738 + ], + "type": "text", + "content": "Figure 35: Task: Image relighting, aiming to modify the lighting of a given image based on either a reference light map or a textual description, while preserving identity, texture, and spatial consistency. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and IC-Light [122] on two subtasks: reference-based and text-based relighting. Evaluations focus on lighting realism, directionality, shadow accuracy, and semantic preservation. Observations: IC-Light achieves the most realistic and consistent relighting across both tasks—accurately applying neon lighting from a reference image and generating sharp shadows and natural light from text prompts. Gemini 2.0 Flash preserves content well but produces softer, less directional lighting. GPT-4o offers more vivid lighting than Gemini 2.0 Flash but sometimes lacks shadow accuracy or background coherence." + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 226, + 411, + 294, + 495 + ], + "blocks": [ + { + "bbox": [ + 226, + 411, + 294, + 495 + ], + "lines": [ + { + "bbox": [ + 226, + 411, + 294, + 495 + ], + "spans": [ + { + "bbox": [ + 226, + 411, + 294, + 495 + ], + "type": "image", + "image_path": "81995073d133c3b619b48c6e202b90c68116f067eb1b41f3fb9d692f8ddaef05.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 314, + 411, + 389, + 495 + ], + "blocks": [ + { + "bbox": [ + 314, + 411, + 389, + 495 + ], + "lines": [ + { + "bbox": [ + 314, + 411, + 389, + 495 + ], + "spans": [ + { + "bbox": [ + 314, + 411, + 389, + 495 + ], + "type": "image", + "image_path": "0bcf87aef23dd6d3d1cfe80c898e5921b661e040732d6eeef59708f811da4ad7.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 408, + 411, + 482, + 496 + ], + "blocks": [ + { + "bbox": [ + 408, + 411, + 482, + 496 + ], + "lines": [ + { + "bbox": [ + 408, + 411, + 482, + 496 + ], + "spans": [ + { + "bbox": [ + 408, + 411, + 482, + 496 + ], + "type": "image", + "image_path": "1d6da19c3cbec9b2d439612a2a8553113f28a4397139ddfe38d9848ae7430cc6.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "bbox": [ + 135, + 498, + 466, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 498, + 466, + 510 + ], + "spans": [ + { + "bbox": [ + 135, + 498, + 466, + 510 + ], + "type": "text", + "content": "Input Text: \"Sunlight from the left side, beach with a reasonable background.\"" + } + ] + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 128, + 512, + 209, + 597 + ], + "blocks": [ + { + "bbox": [ + 128, + 512, + 209, + 597 + ], + "lines": [ + { + "bbox": [ + 128, + 512, + 209, + 597 + ], + "spans": [ + { + "bbox": [ + 128, + 512, + 209, + 597 + ], + "type": "image", + "image_path": "39860355d35a8389c1208ff488e0389da02ec58d4919778ab96d9ee07712b2fd.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 219, + 512, + 299, + 597 + ], + "blocks": [ + { + "bbox": [ + 219, + 512, + 299, + 597 + ], + "lines": [ + { + "bbox": [ + 219, + 512, + 299, + 597 + ], + "spans": [ + { + "bbox": [ + 219, + 512, + 299, + 597 + ], + "type": "image", + "image_path": "b5b70fc8304a56c9d8bae3967435e4b106ab643b441b2630a6c6e823636ba844.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 313, + 512, + 390, + 597 + ], + "blocks": [ + { + "bbox": [ + 313, + 512, + 390, + 597 + ], + "lines": [ + { + "bbox": [ + 313, + 512, + 390, + 597 + ], + "spans": [ + { + "bbox": [ + 313, + 512, + 390, + 597 + ], + "type": "image", + "image_path": "2f4b6a584e5347f2a2ffa4c55015f094171d881e87ced85fc577e90c71f0f4a9.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 408, + 512, + 482, + 597 + ], + "blocks": [ + { + "bbox": [ + 408, + 512, + 482, + 597 + ], + "lines": [ + { + "bbox": [ + 408, + 512, + 482, + 597 + ], + "spans": [ + { + "bbox": [ + 408, + 512, + 482, + 597 + ], + "type": "image", + "image_path": "9e208291de9a6b6fb7d4acda0172d03e668194c7eb8b35eb83dd89d00607f9ab.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "bbox": [ + 138, + 598, + 466, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 598, + 466, + 610 + ], + "spans": [ + { + "bbox": [ + 138, + 598, + 466, + 610 + ], + "type": "text", + "content": "Input Text: \"Sunlight from the left side, beach with a reasonable background.\"" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 141, + 616, + 195, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 616, + 195, + 628 + ], + "spans": [ + { + "bbox": [ + 141, + 616, + 195, + 628 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 247, + 616, + 277, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 616, + 277, + 626 + ], + "spans": [ + { + "bbox": [ + 247, + 616, + 277, + 626 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 317, + 616, + 386, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 616, + 386, + 627 + ], + "spans": [ + { + "bbox": [ + 317, + 616, + 386, + 627 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 428, + 616, + 464, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 428, + 616, + 464, + 628 + ], + "spans": [ + { + "bbox": [ + 428, + 616, + 464, + 628 + ], + "type": "text", + "content": "IC-Light" + } + ] + } + ], + "index": 37 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 750 + ], + "type": "text", + "content": "46" + } + ] + } + ], + "index": 39 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 45 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 177, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 177, + 83 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 177, + 83 + ], + "type": "text", + "content": "2.2.6 Spatial Control" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 91, + 533, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 91, + 533, + 137 + ], + "spans": [ + { + "bbox": [ + 77, + 91, + 533, + 137 + ], + "type": "text", + "content": "Spatial control aims to generate visual outputs that not only reflect the content described in the prompt, but also precisely adhere to additional structural conditions (e.g., canny edge maps, depth maps, sketches, poses, and masks). This task evaluates a model's ability to faithfully align text guidance with visual constraints—an essential capability for real-world creative applications such as illustration, animation, digital content creation, and visual storytelling." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 140, + 533, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 140, + 533, + 185 + ], + "spans": [ + { + "bbox": [ + 77, + 140, + 533, + 185 + ], + "type": "text", + "content": "In this section, we examine GPT-4o's performance across five representative types of controllable conditions: canny, depth, sketch, pose, and mask. For each setting, we compare its outputs with those from Gemini 2.0 Flash [99] and a strong baseline method using ControlNet-based [121] diffusion backbones (FLUX.1-Dev [51], SDXL1.0 [82], SD3 Medium [27] or SD1.5 [90]). The results are illustrated in Figures 36, 37, 38, 39, 40." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 189, + 532, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 189, + 532, + 355 + ], + "spans": [ + { + "bbox": [ + 77, + 189, + 532, + 355 + ], + "type": "text", + "content": "Overall, GPT-4o achieves performance that is on par with ControlNet-based methods in many cases, especially under common or moderately complex conditions. In particular, GPT-4o is capable of handling semantically rich or contextually complex prompts, where its strong foundation model understanding can help preserve both high-level semantics and visual plausibility. This is especially evident in tasks like pose-to-image or mask-to-image, where the structural signal may be sparse or ambiguous. However, GPT-4o's strong generative prior can sometimes lead to overly detailed or hallucinated elements, which compromises structural fidelity. For instance, in canny-to-image or depth-to-image tasks that require fine-grained geometric alignment, GPT-4o may deviate from the input layout more noticeably than traditional diffusion-based methods. In contrast, ControlNet exhibits more stable and accurate control in these low-level structure-guided scenarios, making it better suited for applications where spatial accuracy is critical. That said, ControlNet may struggle in more complex or open-ended cases, such as mask-to-image scenes involving multiple objects or interactions (e.g., aquariums with visitors and fish). In these scenarios, GPT-4o's strong cross-modal understanding partially compensates for its weaker control, offering plausible but not fully precise outputs. By comparison, Gemini 2.0 Flash lacks robust controllable generation capabilities across all evaluated control types. Its outputs often fail to match either the control condition or the textual prompt, reflecting limited capacity in multimodal alignment and structural grounding." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 358, + 533, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 358, + 533, + 404 + ], + "spans": [ + { + "bbox": [ + 77, + 358, + 533, + 404 + ], + "type": "text", + "content": "In summary, GPT-4o demonstrates performance comparable to SOTA methods in most cases, excelling in tasks that require rich semantic understanding and contextual complexity while maintaining a balance between high-level semantics and visual plausibility. Although it may exhibit structural deviations in tasks requiring precise geometric alignment, its strong generative prior gives it an advantage in handling complex or open-ended scenarios." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "47" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 46 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 119, + 180, + 133 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 119, + 180, + 133 + ], + "spans": [ + { + "bbox": [ + 104, + 119, + 180, + 133 + ], + "type": "text", + "content": "Canny-to-Image" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 184, + 148, + 435, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 148, + 435, + 163 + ], + "spans": [ + { + "bbox": [ + 184, + 148, + 435, + 163 + ], + "type": "text", + "content": "Evaluation: Controllability and text consistency." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 170, + 145, + 182, + 160 + ], + "blocks": [ + { + "bbox": [ + 170, + 145, + 182, + 160 + ], + "lines": [ + { + "bbox": [ + 170, + 145, + 182, + 160 + ], + "spans": [ + { + "bbox": [ + 170, + 145, + 182, + 160 + ], + "type": "image", + "image_path": "678977bfcc7731282c3cf0706d2a04a055df0dfc7950b35c45d46e6ffafa9252.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 92, + 170, + 194, + 273 + ], + "blocks": [ + { + "bbox": [ + 92, + 170, + 194, + 273 + ], + "lines": [ + { + "bbox": [ + 92, + 170, + 194, + 273 + ], + "spans": [ + { + "bbox": [ + 92, + 170, + 194, + 273 + ], + "type": "image", + "image_path": "34eb588af3ad0d16e975cf94c0165c9ea9cdbab78610d9c4fea8f37457c30f62.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 198, + 170, + 301, + 272 + ], + "blocks": [ + { + "bbox": [ + 198, + 170, + 301, + 272 + ], + "lines": [ + { + "bbox": [ + 198, + 170, + 301, + 272 + ], + "spans": [ + { + "bbox": [ + 198, + 170, + 301, + 272 + ], + "type": "image", + "image_path": "09b169752ee560ce282e0e189791bd22c980b5f6d260fd06d327d0bb93aa3fdc.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 304, + 171, + 404, + 273 + ], + "blocks": [ + { + "bbox": [ + 304, + 171, + 404, + 273 + ], + "lines": [ + { + "bbox": [ + 304, + 171, + 404, + 273 + ], + "spans": [ + { + "bbox": [ + 304, + 171, + 404, + 273 + ], + "type": "image", + "image_path": "7ba7f124d58c88bee0135fc235b0aebd8b6426909a755ac1f40c313c7c9cad81.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 408, + 171, + 512, + 273 + ], + "blocks": [ + { + "bbox": [ + 408, + 171, + 512, + 273 + ], + "lines": [ + { + "bbox": [ + 408, + 171, + 512, + 273 + ], + "spans": [ + { + "bbox": [ + 408, + 171, + 512, + 273 + ], + "type": "image", + "image_path": "e2e8d2f4bc567a25d1d7f56d98489dc9897b06c00ba90d8a58c28ba70a1e2151.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 96, + 276, + 488, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 276, + 488, + 301 + ], + "spans": [ + { + "bbox": [ + 96, + 276, + 488, + 301 + ], + "type": "text", + "content": "Input Text: \"Follow the prompt and canny condition below to generate a controllable image. The prompt is: a cigarette with purple tobacco.\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 92, + 306, + 194, + 408 + ], + "blocks": [ + { + "bbox": [ + 92, + 306, + 194, + 408 + ], + "lines": [ + { + "bbox": [ + 92, + 306, + 194, + 408 + ], + "spans": [ + { + "bbox": [ + 92, + 306, + 194, + 408 + ], + "type": "image", + "image_path": "509dd28767c58df8dc35fefb0ee74154d0b9a3aadcb516526f252eee3fbd72ef.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 198, + 306, + 301, + 408 + ], + "blocks": [ + { + "bbox": [ + 198, + 306, + 301, + 408 + ], + "lines": [ + { + "bbox": [ + 198, + 306, + 301, + 408 + ], + "spans": [ + { + "bbox": [ + 198, + 306, + 301, + 408 + ], + "type": "image", + "image_path": "01c729abfdd7ba1065b518b5f8fac38df17a322d899f16a92e21606ffe75be31.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 304, + 306, + 406, + 408 + ], + "blocks": [ + { + "bbox": [ + 304, + 306, + 406, + 408 + ], + "lines": [ + { + "bbox": [ + 304, + 306, + 406, + 408 + ], + "spans": [ + { + "bbox": [ + 304, + 306, + 406, + 408 + ], + "type": "image", + "image_path": "572feead6e536b7524eb2f723ff12841c3e537418651f45988dbcb4312766cf2.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 410, + 306, + 512, + 408 + ], + "blocks": [ + { + "bbox": [ + 410, + 306, + 512, + 408 + ], + "lines": [ + { + "bbox": [ + 410, + 306, + 512, + 408 + ], + "spans": [ + { + "bbox": [ + 410, + 306, + 512, + 408 + ], + "type": "image", + "image_path": "9b39db21bfca83d6660c37f7265a41bd9d7830647b6fcbdaafa3cc14372c9c6e.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 96, + 413, + 488, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 413, + 488, + 438 + ], + "spans": [ + { + "bbox": [ + 96, + 413, + 488, + 438 + ], + "type": "text", + "content": "Input Text: \"Follow the prompt and canny condition below to generate a controllable image. The prompt is: a traffic sign with red cross written on it.\"" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 92, + 444, + 194, + 546 + ], + "blocks": [ + { + "bbox": [ + 92, + 444, + 194, + 546 + ], + "lines": [ + { + "bbox": [ + 92, + 444, + 194, + 546 + ], + "spans": [ + { + "bbox": [ + 92, + 444, + 194, + 546 + ], + "type": "image", + "image_path": "582236643d6a53099264228610a5ef869dd1b77a4e837887915e57f35aec63bc.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 612, + 533, + 680 + ], + "lines": [ + { + "bbox": [ + 77, + 612, + 533, + 680 + ], + "spans": [ + { + "bbox": [ + 77, + 612, + 533, + 680 + ], + "type": "text", + "content": "Figure 36: Task: Canny-to-Image generation. The goal is to generate prompt-aligned images guided by canny maps. Setup: Each row shows an input canny map and a text prompt, with outputs from GPT-4o, Gemini 2.0 Flash [99], and FLUX.1-Dev w. ControlNet [51]. Observations: GPT-4o performs worse than FLUX.1-Dev [51] in structural fidelity, often introducing additional visual details that deviate from the input edge map. However, it produces more semantically aligned and aesthetically pleasing results overall. Compared to Gemini 2.0 Flash, GPT-4o significantly outperforms in both structure preservation and prompt consistency." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 198, + 444, + 301, + 545 + ], + "blocks": [ + { + "bbox": [ + 198, + 444, + 301, + 545 + ], + "lines": [ + { + "bbox": [ + 198, + 444, + 301, + 545 + ], + "spans": [ + { + "bbox": [ + 198, + 444, + 301, + 545 + ], + "type": "image", + "image_path": "2bc479794ab27b52ddd075f3f33d1e15a260aae2e169b634378ccdde014ad491.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 304, + 444, + 405, + 545 + ], + "blocks": [ + { + "bbox": [ + 304, + 444, + 405, + 545 + ], + "lines": [ + { + "bbox": [ + 304, + 444, + 405, + 545 + ], + "spans": [ + { + "bbox": [ + 304, + 444, + 405, + 545 + ], + "type": "image", + "image_path": "d328a02168204bffd240fc039c323fc7a9152c7db8d314980163eda7d00ecf5d.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 409, + 444, + 512, + 545 + ], + "blocks": [ + { + "bbox": [ + 409, + 444, + 512, + 545 + ], + "lines": [ + { + "bbox": [ + 409, + 444, + 512, + 545 + ], + "spans": [ + { + "bbox": [ + 409, + 444, + 512, + 545 + ], + "type": "image", + "image_path": "65ad8232e82ff1d205196c6cd60b284ad9f6d0f84803d62c20199cec22907447.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 96, + 548, + 502, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 548, + 502, + 573 + ], + "spans": [ + { + "bbox": [ + 96, + 548, + 502, + 573 + ], + "type": "text", + "content": "Input Text: \"Follow the prompt and canny condition below to generate a controllable image. The prompt is: oil painting of geese flying in a v formation over a pond at sunset.\"" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 115, + 579, + 175, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 579, + 175, + 592 + ], + "spans": [ + { + "bbox": [ + 115, + 579, + 175, + 592 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 233, + 580, + 267, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 580, + 267, + 590 + ], + "spans": [ + { + "bbox": [ + 233, + 580, + 267, + 590 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 580, + 392, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 580, + 392, + 590 + ], + "spans": [ + { + "bbox": [ + 316, + 580, + 392, + 590 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 425, + 575, + 490, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 425, + 575, + 490, + 597 + ], + "spans": [ + { + "bbox": [ + 425, + 575, + 490, + 597 + ], + "type": "text", + "content": "FLUX.1-Dev w. ControlNet" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "48" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 47 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 124, + 181, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 124, + 181, + 137 + ], + "spans": [ + { + "bbox": [ + 104, + 124, + 181, + 137 + ], + "type": "text", + "content": "Depth-to-Image" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 184, + 149, + 435, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 149, + 435, + 163 + ], + "spans": [ + { + "bbox": [ + 184, + 149, + 435, + 163 + ], + "type": "text", + "content": "Evaluation: Controllability and text consistency." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 91, + 171, + 194, + 274 + ], + "blocks": [ + { + "bbox": [ + 91, + 171, + 194, + 274 + ], + "lines": [ + { + "bbox": [ + 91, + 171, + 194, + 274 + ], + "spans": [ + { + "bbox": [ + 91, + 171, + 194, + 274 + ], + "type": "image", + "image_path": "74777175c863d9f366b45a7843d8eb9164d1d6ccee90d1c36c565abd5a6cd450.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 96, + 276, + 489, + 301 + ], + "lines": [ + { + "bbox": [ + 96, + 276, + 489, + 301 + ], + "spans": [ + { + "bbox": [ + 96, + 276, + 489, + 301 + ], + "type": "text", + "content": "Input Text: \"Follow the prompt and depth condition below to generate a controllable image. The prompt is: a wooden bridge that has fallen down in the grass.\"" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 197, + 171, + 300, + 274 + ], + "blocks": [ + { + "bbox": [ + 197, + 171, + 300, + 274 + ], + "lines": [ + { + "bbox": [ + 197, + 171, + 300, + 274 + ], + "spans": [ + { + "bbox": [ + 197, + 171, + 300, + 274 + ], + "type": "image", + "image_path": "4269989ea478ceccbce68d7fb7a7ad5b3bda0673dc3e29b9f2ccd7951d3cd19a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 303, + 171, + 406, + 274 + ], + "blocks": [ + { + "bbox": [ + 303, + 171, + 406, + 274 + ], + "lines": [ + { + "bbox": [ + 303, + 171, + 406, + 274 + ], + "spans": [ + { + "bbox": [ + 303, + 171, + 406, + 274 + ], + "type": "image", + "image_path": "c4a439744d0603cca113101ddccdd39ff6f86154f75db50db717e2e21c43d890.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 410, + 171, + 512, + 274 + ], + "blocks": [ + { + "bbox": [ + 410, + 171, + 512, + 274 + ], + "lines": [ + { + "bbox": [ + 410, + 171, + 512, + 274 + ], + "spans": [ + { + "bbox": [ + 410, + 171, + 512, + 274 + ], + "type": "image", + "image_path": "c34d4877edd4f942f60dcc2c4527c7ca7b8db7854520d882872d6a05a6e0c924.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 92, + 304, + 194, + 407 + ], + "blocks": [ + { + "bbox": [ + 92, + 304, + 194, + 407 + ], + "lines": [ + { + "bbox": [ + 92, + 304, + 194, + 407 + ], + "spans": [ + { + "bbox": [ + 92, + 304, + 194, + 407 + ], + "type": "image", + "image_path": "18cac760bb10b2890aca59d644e60712db22b56fe52a9734a701f01dc680756b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 95, + 411, + 489, + 436 + ], + "lines": [ + { + "bbox": [ + 95, + 411, + 489, + 436 + ], + "spans": [ + { + "bbox": [ + 95, + 411, + 489, + 436 + ], + "type": "text", + "content": "Input Text: \"Follow the prompt and depth condition below to generate a controllable image. The prompt is: a 3d image of a stone building with plants and rocks.\"" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 197, + 304, + 301, + 407 + ], + "blocks": [ + { + "bbox": [ + 197, + 304, + 301, + 407 + ], + "lines": [ + { + "bbox": [ + 197, + 304, + 301, + 407 + ], + "spans": [ + { + "bbox": [ + 197, + 304, + 301, + 407 + ], + "type": "image", + "image_path": "d96c269179392a477b20cac488bf504f948497d76d9712d151212c0dc9a0c84f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 304, + 304, + 406, + 408 + ], + "blocks": [ + { + "bbox": [ + 304, + 304, + 406, + 408 + ], + "lines": [ + { + "bbox": [ + 304, + 304, + 406, + 408 + ], + "spans": [ + { + "bbox": [ + 304, + 304, + 406, + 408 + ], + "type": "image", + "image_path": "033fe2dbc7534a41ab6e0d52eae6d42fbd80b6b350bb3fbca8b041c43fc41b40.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 410, + 304, + 513, + 408 + ], + "blocks": [ + { + "bbox": [ + 410, + 304, + 513, + 408 + ], + "lines": [ + { + "bbox": [ + 410, + 304, + 513, + 408 + ], + "spans": [ + { + "bbox": [ + 410, + 304, + 513, + 408 + ], + "type": "image", + "image_path": "46b577cbc8feb431b513caaf0fa800f1004a3c0cb4738f315c72a97f38bb97c4.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 92, + 438, + 194, + 508 + ], + "blocks": [ + { + "bbox": [ + 92, + 438, + 194, + 508 + ], + "lines": [ + { + "bbox": [ + 92, + 438, + 194, + 508 + ], + "spans": [ + { + "bbox": [ + 92, + 438, + 194, + 508 + ], + "type": "image", + "image_path": "0d51b383aa6e1f47aa6ac0e33d09504e867f5f452a0b912bdf9c993b4e2622b3.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 96, + 510, + 489, + 535 + ], + "lines": [ + { + "bbox": [ + 96, + 510, + 489, + 535 + ], + "spans": [ + { + "bbox": [ + 96, + 510, + 489, + 535 + ], + "type": "text", + "content": "Input Text: \"Follow the prompt and depth condition below to generate a controllable image. The prompt is: a red pillow on a chair.\"" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 197, + 438, + 300, + 508 + ], + "blocks": [ + { + "bbox": [ + 197, + 438, + 300, + 508 + ], + "lines": [ + { + "bbox": [ + 197, + 438, + 300, + 508 + ], + "spans": [ + { + "bbox": [ + 197, + 438, + 300, + 508 + ], + "type": "image", + "image_path": "46017199672cdc547f9700dfe377960c656d99e3ef6cc597d38c0dbafddd087b.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 577, + 533, + 677 + ], + "lines": [ + { + "bbox": [ + 77, + 577, + 533, + 677 + ], + "spans": [ + { + "bbox": [ + 77, + 577, + 533, + 677 + ], + "type": "text", + "content": "Figure 37: Task: Depth-to-image generation, aiming to synthesize controllable and visually coherent images based on a text prompt and a given depth map. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and FLUX.1-Dev w. ControlNet [51], focusing on controllability, text-prompt alignment, and the visual quality of generated scenes. Observations: GPT-4o generates visually appealing and stylistically consistent images that align reasonably with text and depth cues—such as the bridge scene and stone ruins with rich lighting and artistic tone. However, its controllability is weaker than FLUX.1-Dev w. ControlNet [51], which shows more precise depth alignment and object placement, as seen in the accurate layout of the bridge and red pillow. GPT-4o leans toward stylized coherence, while FLUX emphasizes photorealism with sharper spatial fidelity. Gemini 2.0 Flash lags behind both, often showing depth misalignment, shape distortion, and weaker semantic grounding." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 303, + 438, + 406, + 508 + ], + "blocks": [ + { + "bbox": [ + 303, + 438, + 406, + 508 + ], + "lines": [ + { + "bbox": [ + 303, + 438, + 406, + 508 + ], + "spans": [ + { + "bbox": [ + 303, + 438, + 406, + 508 + ], + "type": "image", + "image_path": "90148daf477655ba5438aee9571917bb7d337f466c861465128cb7f0a6616246.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 410, + 438, + 512, + 508 + ], + "blocks": [ + { + "bbox": [ + 410, + 438, + 512, + 508 + ], + "lines": [ + { + "bbox": [ + 410, + 438, + 512, + 508 + ], + "spans": [ + { + "bbox": [ + 410, + 438, + 512, + 508 + ], + "type": "image", + "image_path": "c2e5c471cced0e2bc19cbb418990047f6502461f9ab068015212292ab802d155.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 114, + 542, + 174, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 542, + 174, + 555 + ], + "spans": [ + { + "bbox": [ + 114, + 542, + 174, + 555 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 231, + 542, + 265, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 542, + 265, + 553 + ], + "spans": [ + { + "bbox": [ + 231, + 542, + 265, + 553 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 542, + 392, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 542, + 392, + 553 + ], + "spans": [ + { + "bbox": [ + 315, + 542, + 392, + 553 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 424, + 534, + 490, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 424, + 534, + 490, + 558 + ], + "spans": [ + { + "bbox": [ + 424, + 534, + 490, + 558 + ], + "type": "text", + "content": "FLUX.1-Dev w. ControlNet" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "49" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 48 + }, + { + "para_blocks": [ + { + "bbox": [ + 100, + 99, + 183, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 99, + 183, + 111 + ], + "spans": [ + { + "bbox": [ + 100, + 99, + 183, + 111 + ], + "type": "text", + "content": "Sketch-to-Image" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 169, + 125, + 182, + 140 + ], + "blocks": [ + { + "bbox": [ + 169, + 125, + 182, + 140 + ], + "lines": [ + { + "bbox": [ + 169, + 125, + 182, + 140 + ], + "spans": [ + { + "bbox": [ + 169, + 125, + 182, + 140 + ], + "type": "image", + "image_path": "ca0e9e3e4ad44d1a4998e2e53190039767bd351173e262b703bb663bf16e4dd2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 185, + 129, + 435, + 143 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 129, + 435, + 143 + ], + "spans": [ + { + "bbox": [ + 185, + 129, + 435, + 143 + ], + "type": "text", + "content": "Evaluation: Controllability and text consistency." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 91, + 150, + 194, + 253 + ], + "blocks": [ + { + "bbox": [ + 91, + 150, + 194, + 253 + ], + "lines": [ + { + "bbox": [ + 91, + 150, + 194, + 253 + ], + "spans": [ + { + "bbox": [ + 91, + 150, + 194, + 253 + ], + "type": "image", + "image_path": "b553f7bab31ff9efef4880bc6749cc3b0972fb954926e7cddb979fe4fa86b1e4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 197, + 150, + 301, + 253 + ], + "blocks": [ + { + "bbox": [ + 197, + 150, + 301, + 253 + ], + "lines": [ + { + "bbox": [ + 197, + 150, + 301, + 253 + ], + "spans": [ + { + "bbox": [ + 197, + 150, + 301, + 253 + ], + "type": "image", + "image_path": "e37560da746ee64c9f4dbda99f18792081354ee1290d5fb107e6b17833cab89b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 303, + 150, + 406, + 253 + ], + "blocks": [ + { + "bbox": [ + 303, + 150, + 406, + 253 + ], + "lines": [ + { + "bbox": [ + 303, + 150, + 406, + 253 + ], + "spans": [ + { + "bbox": [ + 303, + 150, + 406, + 253 + ], + "type": "image", + "image_path": "774a2f8158ca42b89d06121387988ac2fa21eab2708a59d2a40c47aeeefeea74.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 410, + 150, + 512, + 253 + ], + "blocks": [ + { + "bbox": [ + 410, + 150, + 512, + 253 + ], + "lines": [ + { + "bbox": [ + 410, + 150, + 512, + 253 + ], + "spans": [ + { + "bbox": [ + 410, + 150, + 512, + 253 + ], + "type": "image", + "image_path": "9b25e051c800568d87dbf568b975a694ac1831be64dac4fe52e936e4cbe78e4f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 96, + 256, + 493, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 256, + 493, + 281 + ], + "spans": [ + { + "bbox": [ + 96, + 256, + 493, + 281 + ], + "type": "text", + "content": "Input Text: \"Follow the prompt and sketch condition below to generate a controllable image. The prompt is: A small giraffe eating grass.\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 89, + 285, + 194, + 387 + ], + "blocks": [ + { + "bbox": [ + 89, + 285, + 194, + 387 + ], + "lines": [ + { + "bbox": [ + 89, + 285, + 194, + 387 + ], + "spans": [ + { + "bbox": [ + 89, + 285, + 194, + 387 + ], + "type": "image", + "image_path": "76b3ffacb274a3c770ba9db5c2fcea3609020e8b054535fa7f050994e7337844.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 587, + 533, + 698 + ], + "lines": [ + { + "bbox": [ + 77, + 587, + 533, + 698 + ], + "spans": [ + { + "bbox": [ + 77, + 587, + 533, + 698 + ], + "type": "text", + "content": "Figure 38: Task: Sketch-to-image generation, which requires translating rough line drawings into realistic and semantically accurate images guided by text prompts. Setup: We evaluate GPT-4o against Gemini 2.0 Flash [99] and SDXL1.0 w. ControlNet [82], focusing on how well each model respects the provided sketch while reflecting the described content. Observations: GPT-4o excels at generating lifelike scenes that match the prompt, often delivering visually pleasing and contextually grounded outputs—like the natural posture and setting of the giraffe or the dynamic movement in the parachute example. However, it tends to soften or reinterpret sketch lines, leading to slight mismatches in fine structure. In contrast, SDXL1.0 w. ControlNet [82] offers stronger adherence to the input sketch, capturing geometric details more accurately (e.g., fan blades and figure outlines), albeit with slightly more synthetic textures. Gemini 2.0 Flash shows limited understanding of both sketch and prompt, often producing less realistic or structurally off-target images." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 196, + 285, + 300, + 387 + ], + "blocks": [ + { + "bbox": [ + 196, + 285, + 300, + 387 + ], + "lines": [ + { + "bbox": [ + 196, + 285, + 300, + 387 + ], + "spans": [ + { + "bbox": [ + 196, + 285, + 300, + 387 + ], + "type": "image", + "image_path": "5bbe5e94282bd4bc533b7d94b749decb59742f5aee7bcb68887dcce921c8b324.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 303, + 285, + 406, + 387 + ], + "blocks": [ + { + "bbox": [ + 303, + 285, + 406, + 387 + ], + "lines": [ + { + "bbox": [ + 303, + 285, + 406, + 387 + ], + "spans": [ + { + "bbox": [ + 303, + 285, + 406, + 387 + ], + "type": "image", + "image_path": "c3fac1a22608550f62bd5e74998a43c8ae3cd590d4dfa1647701dea7d93c7ce8.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 410, + 285, + 512, + 387 + ], + "blocks": [ + { + "bbox": [ + 410, + 285, + 512, + 387 + ], + "lines": [ + { + "bbox": [ + 410, + 285, + 512, + 387 + ], + "spans": [ + { + "bbox": [ + 410, + 285, + 512, + 387 + ], + "type": "image", + "image_path": "b2861bea0423cee774150f0887bdb41c652b436f4eada06228dfb7f7f53078b3.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 95, + 391, + 493, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 391, + 493, + 415 + ], + "spans": [ + { + "bbox": [ + 95, + 391, + 493, + 415 + ], + "type": "text", + "content": "Input Text: \"Follow the prompt and sketch condition below to generate a controllable image. The prompt is: A red metal electric fan.\"" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 89, + 418, + 194, + 520 + ], + "blocks": [ + { + "bbox": [ + 89, + 418, + 194, + 520 + ], + "lines": [ + { + "bbox": [ + 89, + 418, + 194, + 520 + ], + "spans": [ + { + "bbox": [ + 89, + 418, + 194, + 520 + ], + "type": "image", + "image_path": "6e8cda0469a84878dd4813f102a0c8e09c7e5e7f7b3d81edb91ab19c428a22a1.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 196, + 418, + 300, + 520 + ], + "blocks": [ + { + "bbox": [ + 196, + 418, + 300, + 520 + ], + "lines": [ + { + "bbox": [ + 196, + 418, + 300, + 520 + ], + "spans": [ + { + "bbox": [ + 196, + 418, + 300, + 520 + ], + "type": "image", + "image_path": "b143d496a975b31c007201551ab2858b5f989539fd28932d11bf8433f8fa182f.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 303, + 418, + 406, + 520 + ], + "blocks": [ + { + "bbox": [ + 303, + 418, + 406, + 520 + ], + "lines": [ + { + "bbox": [ + 303, + 418, + 406, + 520 + ], + "spans": [ + { + "bbox": [ + 303, + 418, + 406, + 520 + ], + "type": "image", + "image_path": "ecbf422861a6f3acb1bf2fa0c11269caa5fa86090bdbdd18230b1dcaaeb183d8.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 410, + 418, + 512, + 520 + ], + "blocks": [ + { + "bbox": [ + 410, + 418, + 512, + 520 + ], + "lines": [ + { + "bbox": [ + 410, + 418, + 512, + 520 + ], + "spans": [ + { + "bbox": [ + 410, + 418, + 512, + 520 + ], + "type": "image", + "image_path": "e7467303dc6f9aead4b04680ab549cd4c9987da28a7b8e07f93928c3cc115828.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 95, + 522, + 493, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 522, + 493, + 548 + ], + "spans": [ + { + "bbox": [ + 95, + 522, + 493, + 548 + ], + "type": "text", + "content": "Input Text: \"Follow the prompt and sketch condition below to generate a controllable image. The prompt is: a man holding on to the strings of a flying parachute.\"" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 552, + 174, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 552, + 174, + 565 + ], + "spans": [ + { + "bbox": [ + 114, + 552, + 174, + 565 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 231, + 552, + 265, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 552, + 265, + 563 + ], + "spans": [ + { + "bbox": [ + 231, + 552, + 265, + 563 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 553, + 392, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 553, + 392, + 564 + ], + "spans": [ + { + "bbox": [ + 317, + 553, + 392, + 564 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 441, + 549, + 482, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 441, + 549, + 482, + 559 + ], + "spans": [ + { + "bbox": [ + 441, + 549, + 482, + 559 + ], + "type": "text", + "content": "SDXL1.0" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 428, + 560, + 493, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 428, + 560, + 493, + 571 + ], + "spans": [ + { + "bbox": [ + 428, + 560, + 493, + 571 + ], + "type": "text", + "content": "w. ControlNet" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "50" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 49 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 170, + 118, + 184, + 134 + ], + "blocks": [ + { + "bbox": [ + 170, + 118, + 184, + 134 + ], + "lines": [ + { + "bbox": [ + 170, + 118, + 184, + 134 + ], + "spans": [ + { + "bbox": [ + 170, + 118, + 184, + 134 + ], + "type": "image", + "image_path": "4f2a82196d91323ac6987c501fe192b87a37b8c7bd49cfefc255b54f649c96f1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 185, + 122, + 435, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 122, + 435, + 137 + ], + "spans": [ + { + "bbox": [ + 185, + 122, + 435, + 137 + ], + "type": "text", + "content": "Evaluation: Controllability and text consistency." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 92, + 140, + 193, + 220 + ], + "blocks": [ + { + "bbox": [ + 92, + 140, + 193, + 220 + ], + "lines": [ + { + "bbox": [ + 92, + 140, + 193, + 220 + ], + "spans": [ + { + "bbox": [ + 92, + 140, + 193, + 220 + ], + "type": "image", + "image_path": "ef305485daf2aa15d9580d3579fba6ec68a2f97825b971e73b7dda80fc7b27d3.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 95, + 222, + 502, + 258 + ], + "lines": [ + { + "bbox": [ + 95, + 222, + 502, + 258 + ], + "spans": [ + { + "bbox": [ + 95, + 222, + 502, + 258 + ], + "type": "text", + "content": "Input Text: \"Follow the prompt and pose condition below to generate a controllable image. The prompt is: Quarterly in a blue and white jersey with number 14, preparing to throw a football during a game.\"" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 196, + 140, + 300, + 220 + ], + "blocks": [ + { + "bbox": [ + 196, + 140, + 300, + 220 + ], + "lines": [ + { + "bbox": [ + 196, + 140, + 300, + 220 + ], + "spans": [ + { + "bbox": [ + 196, + 140, + 300, + 220 + ], + "type": "image", + "image_path": "3015117dffd29de40979d63938f43194ed356ec24b893f0eca89a8e4d2397504.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 304, + 140, + 407, + 221 + ], + "blocks": [ + { + "bbox": [ + 304, + 140, + 407, + 221 + ], + "lines": [ + { + "bbox": [ + 304, + 140, + 407, + 221 + ], + "spans": [ + { + "bbox": [ + 304, + 140, + 407, + 221 + ], + "type": "image", + "image_path": "49309cffd79e60cb0270f525f65a35ec7a1dda136d91a8690b5a4235fa3ad428.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 410, + 140, + 512, + 221 + ], + "blocks": [ + { + "bbox": [ + 410, + 140, + 512, + 221 + ], + "lines": [ + { + "bbox": [ + 410, + 140, + 512, + 221 + ], + "spans": [ + { + "bbox": [ + 410, + 140, + 512, + 221 + ], + "type": "image", + "image_path": "4f0a2ccea09917b8073951d104a30402dee27f0a79f45727ae62f4aca6252dac.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 92, + 262, + 194, + 377 + ], + "blocks": [ + { + "bbox": [ + 92, + 262, + 194, + 377 + ], + "lines": [ + { + "bbox": [ + 92, + 262, + 194, + 377 + ], + "spans": [ + { + "bbox": [ + 92, + 262, + 194, + 377 + ], + "type": "image", + "image_path": "1529d55096562ea1287a6e80927c823cf3ef85f6ff2fc349534df8add404dfe4.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 95, + 380, + 514, + 417 + ], + "lines": [ + { + "bbox": [ + 95, + 380, + 514, + 417 + ], + "spans": [ + { + "bbox": [ + 95, + 380, + 514, + 417 + ], + "type": "text", + "content": "Input Text: \"Follow the prompt and pose condition below to generate a controllable image. The prompt is : A young woman with long brown hair, wearing a blue strapless dress and a black necklace with a butterfly pendant, poses against a beige background.\"" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 196, + 262, + 301, + 377 + ], + "blocks": [ + { + "bbox": [ + 196, + 262, + 301, + 377 + ], + "lines": [ + { + "bbox": [ + 196, + 262, + 301, + 377 + ], + "spans": [ + { + "bbox": [ + 196, + 262, + 301, + 377 + ], + "type": "image", + "image_path": "cbc8f5122209a93c7fcd5c01f787a5328fd12da73ba30b8fe0fa36d8089f19ee.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 304, + 261, + 406, + 377 + ], + "blocks": [ + { + "bbox": [ + 304, + 261, + 406, + 377 + ], + "lines": [ + { + "bbox": [ + 304, + 261, + 406, + 377 + ], + "spans": [ + { + "bbox": [ + 304, + 261, + 406, + 377 + ], + "type": "image", + "image_path": "b6efdeb3733117ab0264a27bea6c1da70b95aac300ccfe78cc7e7eaf29cc41bc.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 410, + 261, + 512, + 377 + ], + "blocks": [ + { + "bbox": [ + 410, + 261, + 512, + 377 + ], + "lines": [ + { + "bbox": [ + 410, + 261, + 512, + 377 + ], + "spans": [ + { + "bbox": [ + 410, + 261, + 512, + 377 + ], + "type": "image", + "image_path": "386ab41549ccbbdcb6953f2cfb0c22f29b59c77cb08a11e2d82aaf042ab0a3a4.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 92, + 421, + 193, + 543 + ], + "blocks": [ + { + "bbox": [ + 92, + 421, + 193, + 543 + ], + "lines": [ + { + "bbox": [ + 92, + 421, + 193, + 543 + ], + "spans": [ + { + "bbox": [ + 92, + 421, + 193, + 543 + ], + "type": "image", + "image_path": "12d972a127e37a6219e4bd892eadd138cfbb736fb6916e3a8ea8c8cccd698774.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 95, + 545, + 483, + 571 + ], + "lines": [ + { + "bbox": [ + 95, + 545, + 483, + 571 + ], + "spans": [ + { + "bbox": [ + 95, + 545, + 483, + 571 + ], + "type": "text", + "content": "Input Text: \"Follow the prompt and pose condition below to generate a controllable image. The prompt is : A woman is performing a pull-up exercise on a gym rack.\"" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 196, + 421, + 299, + 543 + ], + "blocks": [ + { + "bbox": [ + 196, + 421, + 299, + 543 + ], + "lines": [ + { + "bbox": [ + 196, + 421, + 299, + 543 + ], + "spans": [ + { + "bbox": [ + 196, + 421, + 299, + 543 + ], + "type": "image", + "image_path": "eff06e3cb3442fb9e89cec40098e50870a9ded39e1f69753076da696882aa86d.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 606, + 533, + 706 + ], + "lines": [ + { + "bbox": [ + 77, + 606, + 533, + 706 + ], + "spans": [ + { + "bbox": [ + 77, + 606, + 533, + 706 + ], + "type": "text", + "content": "Figure 39: Task: Pose-to-image generation, aiming to synthesize realistic images that reflect both the human pose and descriptive prompt. Setup: We benchmark GPT-4o against Gemini 2.0 Flash [99] and SD3 Medium w. ControlNet [27], evaluating their ability to follow pose conditions while generating semantically accurate and coherent images. Observations: GPT-4o performs well in complex scenes—such as the football example—where it effectively integrates pose, clothing, and background with strong realism, contextual and pose accuracy. In simpler cases like the pull-up exercise, it shows occasional pose drift, especially in limbs. SD3 Medium w. ControlNet [27] offers better pose fidelity overall, though its visual quality can be inconsistent. Gemini 2.0 Flash underperforms in both structure and coherence, often generating anatomically incorrect or visually weak results. Overall, GPT-4o balances text understanding and generation quality, especially in detailed prompts." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 304, + 421, + 407, + 543 + ], + "blocks": [ + { + "bbox": [ + 304, + 421, + 407, + 543 + ], + "lines": [ + { + "bbox": [ + 304, + 421, + 407, + 543 + ], + "spans": [ + { + "bbox": [ + 304, + 421, + 407, + 543 + ], + "type": "image", + "image_path": "c9e2f0970eb2e881c87f2518165ec8fafd6765bc14d8e17f59829268ad6411c8.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 410, + 421, + 512, + 543 + ], + "blocks": [ + { + "bbox": [ + 410, + 421, + 512, + 543 + ], + "lines": [ + { + "bbox": [ + 410, + 421, + 512, + 543 + ], + "spans": [ + { + "bbox": [ + 410, + 421, + 512, + 543 + ], + "type": "image", + "image_path": "b7302a93838bd0514ce6b4544837c9f5854b7f09d9a31b036a582c5735883900.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 114, + 578, + 174, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 578, + 174, + 591 + ], + "spans": [ + { + "bbox": [ + 114, + 578, + 174, + 591 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 230, + 578, + 265, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 578, + 265, + 589 + ], + "spans": [ + { + "bbox": [ + 230, + 578, + 265, + 589 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 577, + 392, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 577, + 392, + 587 + ], + "spans": [ + { + "bbox": [ + 316, + 577, + 392, + 587 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 432, + 570, + 496, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 432, + 570, + 496, + 594 + ], + "spans": [ + { + "bbox": [ + 432, + 570, + 496, + 594 + ], + "type": "text", + "content": "SD3 Medium w. ControlNet" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 107, + 93, + 178, + 106 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 93, + 178, + 106 + ], + "spans": [ + { + "bbox": [ + 107, + 93, + 178, + 106 + ], + "type": "text", + "content": "Pose-to-Image" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "51" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 50 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 84, + 179, + 98 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 84, + 179, + 98 + ], + "spans": [ + { + "bbox": [ + 104, + 84, + 179, + 98 + ], + "type": "text", + "content": "Mask-to-Image" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 170, + 106, + 184, + 121 + ], + "blocks": [ + { + "bbox": [ + 170, + 106, + 184, + 121 + ], + "lines": [ + { + "bbox": [ + 170, + 106, + 184, + 121 + ], + "spans": [ + { + "bbox": [ + 170, + 106, + 184, + 121 + ], + "type": "image", + "image_path": "3395f7e5893f7029588b9d5b16f3549b7c5baa04869e047e731f2d72c81f6981.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 184, + 110, + 436, + 124 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 110, + 436, + 124 + ], + "spans": [ + { + "bbox": [ + 184, + 110, + 436, + 124 + ], + "type": "text", + "content": "Evaluation: Controllability and text consistency." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 91, + 131, + 194, + 233 + ], + "blocks": [ + { + "bbox": [ + 91, + 131, + 194, + 233 + ], + "lines": [ + { + "bbox": [ + 91, + 131, + 194, + 233 + ], + "spans": [ + { + "bbox": [ + 91, + 131, + 194, + 233 + ], + "type": "image", + "image_path": "3b637fcfa8060bbf723d957b7f4a1aa49a5f9d87c744993d0514d9873e23989e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 196, + 132, + 300, + 234 + ], + "blocks": [ + { + "bbox": [ + 196, + 132, + 300, + 234 + ], + "lines": [ + { + "bbox": [ + 196, + 132, + 300, + 234 + ], + "spans": [ + { + "bbox": [ + 196, + 132, + 300, + 234 + ], + "type": "image", + "image_path": "8175a842bf2f363a8a70eb3f0550f36e400f3fdd4bd3f80909fb7879a8e130c1.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 303, + 132, + 406, + 234 + ], + "blocks": [ + { + "bbox": [ + 303, + 132, + 406, + 234 + ], + "lines": [ + { + "bbox": [ + 303, + 132, + 406, + 234 + ], + "spans": [ + { + "bbox": [ + 303, + 132, + 406, + 234 + ], + "type": "image", + "image_path": "6e8d8fa6a7fc992e352dc0100c93543a6693728f8a6fe64aac51f54c477a6736.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 410, + 132, + 512, + 234 + ], + "blocks": [ + { + "bbox": [ + 410, + 132, + 512, + 234 + ], + "lines": [ + { + "bbox": [ + 410, + 132, + 512, + 234 + ], + "spans": [ + { + "bbox": [ + 410, + 132, + 512, + 234 + ], + "type": "image", + "image_path": "494f4bc560feb0a0c69c549d9e94d552cb214aa34f2e532c73c99c615ff3a691.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 95, + 237, + 488, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 237, + 488, + 274 + ], + "spans": [ + { + "bbox": [ + 95, + 237, + 488, + 274 + ], + "type": "text", + "content": "Input Text: \"Follow the prompt and pose condition below to generate a controllable image. The prompt is : A peaceful indoor church scene with a plain wall, stained glass windows, a wooden podium, and a stone altar under soft sunlight.\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 92, + 277, + 194, + 378 + ], + "blocks": [ + { + "bbox": [ + 92, + 277, + 194, + 378 + ], + "lines": [ + { + "bbox": [ + 92, + 277, + 194, + 378 + ], + "spans": [ + { + "bbox": [ + 92, + 277, + 194, + 378 + ], + "type": "image", + "image_path": "8d843e8b140fd3c6f219b02b54f981e68e8501565fcef5774b45f8e54c5de112.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 196, + 276, + 299, + 377 + ], + "blocks": [ + { + "bbox": [ + 196, + 276, + 299, + 377 + ], + "lines": [ + { + "bbox": [ + 196, + 276, + 299, + 377 + ], + "spans": [ + { + "bbox": [ + 196, + 276, + 299, + 377 + ], + "type": "image", + "image_path": "e9707ee40f1e21949b575b798c93018191eaeb0953a7978ef48d77face6376a1.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 304, + 276, + 406, + 377 + ], + "blocks": [ + { + "bbox": [ + 304, + 276, + 406, + 377 + ], + "lines": [ + { + "bbox": [ + 304, + 276, + 406, + 377 + ], + "spans": [ + { + "bbox": [ + 304, + 276, + 406, + 377 + ], + "type": "image", + "image_path": "41fa18b12458faa6a8dc00ecb9ddebef31263254df2985d4bd17d39f0aee636c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 410, + 276, + 512, + 377 + ], + "blocks": [ + { + "bbox": [ + 410, + 276, + 512, + 377 + ], + "lines": [ + { + "bbox": [ + 410, + 276, + 512, + 377 + ], + "spans": [ + { + "bbox": [ + 410, + 276, + 512, + 377 + ], + "type": "image", + "image_path": "fd41fad2fe31ea862cb86cef72fd1fa75256598dab78c33edf42129c6eb8f4a1.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 95, + 382, + 508, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 382, + 508, + 430 + ], + "spans": [ + { + "bbox": [ + 95, + 382, + 508, + 430 + ], + "type": "text", + "content": "Input Text: \"Follow the prompt and pose condition below to generate a controllable image. The prompt is : An indoor aquarium scene with a large fish tank full of colorful tropical fish swimming. The fish tank is surrounded by walls and has a visible floor at the bottom. The environment is bright and underwater-themed.\"" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 91, + 432, + 194, + 534 + ], + "blocks": [ + { + "bbox": [ + 91, + 432, + 194, + 534 + ], + "lines": [ + { + "bbox": [ + 91, + 432, + 194, + 534 + ], + "spans": [ + { + "bbox": [ + 91, + 432, + 194, + 534 + ], + "type": "image", + "image_path": "32972f2863a4ed568d406e940ec73fb2acb89d05a9ce88264831494aca2909d5.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 607, + 533, + 718 + ], + "lines": [ + { + "bbox": [ + 77, + 607, + 533, + 718 + ], + "spans": [ + { + "bbox": [ + 77, + 607, + 533, + 718 + ], + "type": "text", + "content": "Figure 40: Task: Mask-to-image generation, which requires translating semantic segmentation maps and textual prompts into coherent and realistic images. Setup: We compare GPT-4o with Gemini 2.0 Flash [99] and SD1.5 w. ControlNet [90], focusing on their ability to combine spatial layout from the mask with deeper scene understanding from the prompt. Observations: Compared to previous control tasks, this setting demands more from the model in terms of semantic reasoning and compositional understanding. GPT-4o excels in this regard, producing visually consistent scenes that align with the prompt's intent—such as the serene church interior and the immersive aquarium setting with visitors. However, in fine-grained spatial control, especially with small or tightly shaped objects like tropical fish, SD1.5 w. ControlNet [90] performs better in preserving shape and positioning. Gemini 2.0 Flash continues to struggle in both fidelity and adherence to masks, often missing key scene elements or producing oversimplified outputs." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 196, + 433, + 299, + 533 + ], + "blocks": [ + { + "bbox": [ + 196, + 433, + 299, + 533 + ], + "lines": [ + { + "bbox": [ + 196, + 433, + 299, + 533 + ], + "spans": [ + { + "bbox": [ + 196, + 433, + 299, + 533 + ], + "type": "image", + "image_path": "97eac775ab7fe9404225b9ec41d4b00866061d69df88ce9adc4b8d365ba02992.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 304, + 433, + 406, + 533 + ], + "blocks": [ + { + "bbox": [ + 304, + 433, + 406, + 533 + ], + "lines": [ + { + "bbox": [ + 304, + 433, + 406, + 533 + ], + "spans": [ + { + "bbox": [ + 304, + 433, + 406, + 533 + ], + "type": "image", + "image_path": "de15cb94a339b40e3ca5dcb35e27521f86d056376dba353e39d9dfc849b5350a.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 410, + 433, + 512, + 534 + ], + "blocks": [ + { + "bbox": [ + 410, + 433, + 512, + 534 + ], + "lines": [ + { + "bbox": [ + 410, + 433, + 512, + 534 + ], + "spans": [ + { + "bbox": [ + 410, + 433, + 512, + 534 + ], + "type": "image", + "image_path": "c99eb6ee2f33a13babe23a2160bddcbb33b49dbaaa63c5c80ad3c9928ada400c.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 95, + 536, + 510, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 536, + 510, + 571 + ], + "spans": [ + { + "bbox": [ + 95, + 536, + 510, + 571 + ], + "type": "text", + "content": "Input Text: \"Follow the prompt and mask condition below to generate a controllable image. The prompt is: An indoor aquarium with a large fish tank and colorful tropical fish, with a few visitors in the scene.\"" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 576, + 175, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 576, + 175, + 590 + ], + "spans": [ + { + "bbox": [ + 114, + 576, + 175, + 590 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 231, + 576, + 266, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 576, + 266, + 588 + ], + "spans": [ + { + "bbox": [ + 231, + 576, + 266, + 588 + ], + "type": "text", + "content": "GPT40" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 576, + 393, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 576, + 393, + 588 + ], + "spans": [ + { + "bbox": [ + 316, + 576, + 393, + 588 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 419, + 576, + 501, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 576, + 501, + 588 + ], + "spans": [ + { + "bbox": [ + 419, + 576, + 501, + 588 + ], + "type": "text", + "content": "SD w. ControlNet" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "52" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 51 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 181, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 181, + 83 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 181, + 83 + ], + "type": "text", + "content": "2.2.7 Camera Control" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 91, + 533, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 91, + 533, + 168 + ], + "spans": [ + { + "bbox": [ + 77, + 91, + 533, + 168 + ], + "type": "text", + "content": "Although recent visual generative models demonstrate remarkable capabilities in creating high-quality images, generating images with specific camera settings (e.g., bokeh blur parameters, focal length, shutter speed, color temperature) and making further adjustments remains a challenging task. We further explore GPT-4o's performance in camera control, evaluating its ability to generate images with desired photographic parameters in text instructions. This task is particularly significant as it bridges the gap between artistic creativity and technical precision, enabling users to simulate professional photography techniques and achieve greater control over the visual output. Such advancements have broad applications in fields like photography, cinematography, and visual design." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 173, + 531, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 173, + 531, + 251 + ], + "spans": [ + { + "bbox": [ + 77, + 173, + 531, + 251 + ], + "type": "text", + "content": "Specifically, we collect text prompts from [118], and compare GPT-4o and Gemini 2.0 Flash [99] with Generative Photography (GP) [118]. The results are reported in Figures 41, 42. We can observe that GPT-4o achieves decent results in controlling bokeh blur parameters and color temperature, demonstrating its strong generalizability to various photographic settings. However, it still falls short in adjusting focal length and shutter speed, occasionally leading to inconsistent visual semantics or incorrect visual effects. By comparison, Gemini 2.0 Flash struggles significantly across all camera control scenarios, failing to produce coherent or accurate outputs that align with the specified photographic parameters, highlighting its limited capability in this domain." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 255, + 531, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 255, + 531, + 289 + ], + "spans": [ + { + "bbox": [ + 77, + 255, + 531, + 289 + ], + "type": "text", + "content": "In this task, GPT-4o shows promising potential in camera control, outperforming Gemini 2.0 Flash and achieving competitive results in certain aspects. Nonetheless, there remains room for improvement in handling more complex adjustments, which could further enhance its applicability in professional photography and creative industries." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "53" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 52 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 118, + 181, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 118, + 181, + 129 + ], + "spans": [ + { + "bbox": [ + 107, + 118, + 181, + 129 + ], + "type": "text", + "content": "Camera Control" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 132, + 140, + 145, + 155 + ], + "blocks": [ + { + "bbox": [ + 132, + 140, + 145, + 155 + ], + "lines": [ + { + "bbox": [ + 132, + 140, + 145, + 155 + ], + "spans": [ + { + "bbox": [ + 132, + 140, + 145, + 155 + ], + "type": "image", + "image_path": "541421ed5de9f82a9202335ca7678323b87627971bf9397ec165d6899894a416.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 147, + 143, + 473, + 157 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 143, + 473, + 157 + ], + "spans": [ + { + "bbox": [ + 147, + 143, + 473, + 157 + ], + "type": "text", + "content": "Evaluation: Camera setting adjustment, semantic consistency." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 93, + 184, + 127, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 184, + 127, + 194 + ], + "spans": [ + { + "bbox": [ + 93, + 184, + 127, + 194 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 91, + 225, + 124, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 225, + 124, + 236 + ], + "spans": [ + { + "bbox": [ + 91, + 225, + 124, + 236 + ], + "type": "text", + "content": "Gemini" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 86, + 239, + 129, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 239, + 129, + 250 + ], + "spans": [ + { + "bbox": [ + 86, + 239, + 129, + 250 + ], + "type": "text", + "content": "2.0 Flash" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 100, + 285, + 114, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 285, + 114, + 296 + ], + "spans": [ + { + "bbox": [ + 100, + 285, + 114, + 296 + ], + "type": "text", + "content": "GP" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 131, + 161, + 204, + 211 + ], + "blocks": [ + { + "bbox": [ + 131, + 161, + 204, + 211 + ], + "lines": [ + { + "bbox": [ + 131, + 161, + 204, + 211 + ], + "spans": [ + { + "bbox": [ + 131, + 161, + 204, + 211 + ], + "type": "image", + "image_path": "57830b99a43f06f49ba04d1b2f5ded94ea4df8175531c29f23aca0f89a53b83b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 131, + 212, + 204, + 262 + ], + "blocks": [ + { + "bbox": [ + 131, + 212, + 204, + 262 + ], + "lines": [ + { + "bbox": [ + 131, + 212, + 204, + 262 + ], + "spans": [ + { + "bbox": [ + 131, + 212, + 204, + 262 + ], + "type": "image", + "image_path": "91df94d12a7739436d063cd8168ea4e4d26c88f28f02f9407f230adabe8d29e6.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 131, + 262, + 204, + 313 + ], + "blocks": [ + { + "bbox": [ + 131, + 262, + 204, + 313 + ], + "lines": [ + { + "bbox": [ + 131, + 262, + 204, + 313 + ], + "spans": [ + { + "bbox": [ + 131, + 262, + 204, + 313 + ], + "type": "image", + "image_path": "0ec6090994cbd2295c92ceaabd8108bda9fc1d44cff795793c63c2f03a6f1419.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 157, + 316, + 176, + 325 + ], + "lines": [ + { + "bbox": [ + 157, + 316, + 176, + 325 + ], + "spans": [ + { + "bbox": [ + 157, + 316, + 176, + 325 + ], + "type": "text", + "content": "28.0" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 206, + 161, + 279, + 211 + ], + "blocks": [ + { + "bbox": [ + 206, + 161, + 279, + 211 + ], + "lines": [ + { + "bbox": [ + 206, + 161, + 279, + 211 + ], + "spans": [ + { + "bbox": [ + 206, + 161, + 279, + 211 + ], + "type": "image", + "image_path": "f69e3b0379013765f2adbc1aca797b0298039407cd2b3e804055f8aa7b5d7129.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 206, + 212, + 279, + 261 + ], + "blocks": [ + { + "bbox": [ + 206, + 212, + 279, + 261 + ], + "lines": [ + { + "bbox": [ + 206, + 212, + 279, + 261 + ], + "spans": [ + { + "bbox": [ + 206, + 212, + 279, + 261 + ], + "type": "image", + "image_path": "e06a1d22dd6093751188bb61a4da58c22606454753b4f5ef255503039c23b4d9.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 206, + 262, + 279, + 313 + ], + "blocks": [ + { + "bbox": [ + 206, + 262, + 279, + 313 + ], + "lines": [ + { + "bbox": [ + 206, + 262, + 279, + 313 + ], + "spans": [ + { + "bbox": [ + 206, + 262, + 279, + 313 + ], + "type": "image", + "image_path": "c037197eb3df793fe54ce69be58db82135ee54f307b277b1651e4aea252edd89.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 233, + 316, + 252, + 325 + ], + "lines": [ + { + "bbox": [ + 233, + 316, + 252, + 325 + ], + "spans": [ + { + "bbox": [ + 233, + 316, + 252, + 325 + ], + "type": "text", + "content": "14.0" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 282, + 161, + 354, + 211 + ], + "blocks": [ + { + "bbox": [ + 282, + 161, + 354, + 211 + ], + "lines": [ + { + "bbox": [ + 282, + 161, + 354, + 211 + ], + "spans": [ + { + "bbox": [ + 282, + 161, + 354, + 211 + ], + "type": "image", + "image_path": "7617257b54d51121e2b378531c54403d5f093b828cb72764f28cbaf8e2d75466.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 282, + 212, + 354, + 261 + ], + "blocks": [ + { + "bbox": [ + 282, + 212, + 354, + 261 + ], + "lines": [ + { + "bbox": [ + 282, + 212, + 354, + 261 + ], + "spans": [ + { + "bbox": [ + 282, + 212, + 354, + 261 + ], + "type": "image", + "image_path": "cbefa33dacb0fcc87c8295a83dda71231a6f564b3f6a39b28e9b44a76d014c28.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 282, + 262, + 354, + 313 + ], + "blocks": [ + { + "bbox": [ + 282, + 262, + 354, + 313 + ], + "lines": [ + { + "bbox": [ + 282, + 262, + 354, + 313 + ], + "spans": [ + { + "bbox": [ + 282, + 262, + 354, + 313 + ], + "type": "image", + "image_path": "0abff9671077ea8920b3cfaa655376afa170146e8731c668c911bd36367fe676.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 308, + 316, + 327, + 325 + ], + "lines": [ + { + "bbox": [ + 308, + 316, + 327, + 325 + ], + "spans": [ + { + "bbox": [ + 308, + 316, + 327, + 325 + ], + "type": "text", + "content": "10.0" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 357, + 161, + 430, + 211 + ], + "blocks": [ + { + "bbox": [ + 357, + 161, + 430, + 211 + ], + "lines": [ + { + "bbox": [ + 357, + 161, + 430, + 211 + ], + "spans": [ + { + "bbox": [ + 357, + 161, + 430, + 211 + ], + "type": "image", + "image_path": "3f74eb072bab2b800151c5b569b4d234ff14848a96ea3fa56d91a676cf3c70bf.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 357, + 212, + 429, + 261 + ], + "blocks": [ + { + "bbox": [ + 357, + 212, + 429, + 261 + ], + "lines": [ + { + "bbox": [ + 357, + 212, + 429, + 261 + ], + "spans": [ + { + "bbox": [ + 357, + 212, + 429, + 261 + ], + "type": "image", + "image_path": "3fe16c86a896d908033e9f78ddd28e8ff19dae54bfcbf58eef865c9f9147a115.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 357, + 262, + 430, + 313 + ], + "blocks": [ + { + "bbox": [ + 357, + 262, + 430, + 313 + ], + "lines": [ + { + "bbox": [ + 357, + 262, + 430, + 313 + ], + "spans": [ + { + "bbox": [ + 357, + 262, + 430, + 313 + ], + "type": "image", + "image_path": "e138171044c5ed197f98e471ee68b0a7988c369038ddde3296f693594e3ed2c6.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 386, + 316, + 400, + 325 + ], + "lines": [ + { + "bbox": [ + 386, + 316, + 400, + 325 + ], + "spans": [ + { + "bbox": [ + 386, + 316, + 400, + 325 + ], + "type": "text", + "content": "6.0" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 432, + 161, + 506, + 211 + ], + "blocks": [ + { + "bbox": [ + 432, + 161, + 506, + 211 + ], + "lines": [ + { + "bbox": [ + 432, + 161, + 506, + 211 + ], + "spans": [ + { + "bbox": [ + 432, + 161, + 506, + 211 + ], + "type": "image", + "image_path": "4ae472a9c88dd869cf1cee61a0638cf7a434e0355bfd8577ac37c979ef8d008a.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 432, + 212, + 505, + 261 + ], + "blocks": [ + { + "bbox": [ + 432, + 212, + 505, + 261 + ], + "lines": [ + { + "bbox": [ + 432, + 212, + 505, + 261 + ], + "spans": [ + { + "bbox": [ + 432, + 212, + 505, + 261 + ], + "type": "image", + "image_path": "02202d497506f20a18941658d169deee92ee7a8b17e62ab0a67e4f38d7fc3f81.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 432, + 262, + 506, + 313 + ], + "blocks": [ + { + "bbox": [ + 432, + 262, + 506, + 313 + ], + "lines": [ + { + "bbox": [ + 432, + 262, + 506, + 313 + ], + "spans": [ + { + "bbox": [ + 432, + 262, + 506, + 313 + ], + "type": "image", + "image_path": "359582282a17e20740a8e878147c496925396e8e780d4f8429b70706e235f66f.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 462, + 316, + 475, + 325 + ], + "lines": [ + { + "bbox": [ + 462, + 316, + 475, + 325 + ], + "spans": [ + { + "bbox": [ + 462, + 316, + 475, + 325 + ], + "type": "text", + "content": "2.0" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "bbox": [ + 94, + 394, + 128, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 394, + 128, + 405 + ], + "spans": [ + { + "bbox": [ + 94, + 394, + 128, + 405 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 93, + 438, + 124, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 438, + 124, + 449 + ], + "spans": [ + { + "bbox": [ + 93, + 438, + 124, + 449 + ], + "type": "text", + "content": "Gemini" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 88, + 453, + 129, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 453, + 129, + 463 + ], + "spans": [ + { + "bbox": [ + 88, + 453, + 129, + 463 + ], + "type": "text", + "content": "2.0 Flash" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 103, + 495, + 116, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 495, + 116, + 505 + ], + "spans": [ + { + "bbox": [ + 103, + 495, + 116, + 505 + ], + "type": "text", + "content": "GP" + } + ] + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 133, + 372, + 205, + 422 + ], + "blocks": [ + { + "bbox": [ + 93, + 329, + 494, + 366 + ], + "lines": [ + { + "bbox": [ + 93, + 329, + 494, + 366 + ], + "spans": [ + { + "bbox": [ + 93, + 329, + 494, + 366 + ], + "type": "text", + "content": "Input Text: \"A horse with a white face stands in a grassy field, looking at the camera; with bokeh blur parameter *\" & \"Adjust the bokeh blur parameter to *\" (* indicates a specific value)." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 133, + 372, + 205, + 422 + ], + "lines": [ + { + "bbox": [ + 133, + 372, + 205, + 422 + ], + "spans": [ + { + "bbox": [ + 133, + 372, + 205, + 422 + ], + "type": "image", + "image_path": "6ac4b0f6100f66c6e3a3675e85edf101192eaa5e7afac67acbc73dad433f98df.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 133, + 423, + 205, + 472 + ], + "blocks": [ + { + "bbox": [ + 133, + 423, + 205, + 472 + ], + "lines": [ + { + "bbox": [ + 133, + 423, + 205, + 472 + ], + "spans": [ + { + "bbox": [ + 133, + 423, + 205, + 472 + ], + "type": "image", + "image_path": "f13c64cd986d4479510b7a4819c3393ff74c88928cb3d8ab9642dde087405fb8.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 133, + 473, + 205, + 522 + ], + "blocks": [ + { + "bbox": [ + 133, + 473, + 205, + 522 + ], + "lines": [ + { + "bbox": [ + 133, + 473, + 205, + 522 + ], + "spans": [ + { + "bbox": [ + 133, + 473, + 205, + 522 + ], + "type": "image", + "image_path": "8ba638159d6af8b00306bfa770db1930683a70fb3c84b43764507be8b7b6f124.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 159, + 526, + 178, + 535 + ], + "lines": [ + { + "bbox": [ + 159, + 526, + 178, + 535 + ], + "spans": [ + { + "bbox": [ + 159, + 526, + 178, + 535 + ], + "type": "text", + "content": "24.9" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_caption" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 208, + 373, + 279, + 422 + ], + "blocks": [ + { + "bbox": [ + 208, + 373, + 279, + 422 + ], + "lines": [ + { + "bbox": [ + 208, + 373, + 279, + 422 + ], + "spans": [ + { + "bbox": [ + 208, + 373, + 279, + 422 + ], + "type": "image", + "image_path": "032dd6d95007fdd7dba51b274352d33e5c899e1f0a93a08947da88f005ed8904.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 208, + 423, + 279, + 472 + ], + "blocks": [ + { + "bbox": [ + 208, + 423, + 279, + 472 + ], + "lines": [ + { + "bbox": [ + 208, + 423, + 279, + 472 + ], + "spans": [ + { + "bbox": [ + 208, + 423, + 279, + 472 + ], + "type": "image", + "image_path": "32ffc1eb96999685c0b89d169c9aa5316abd2e06ff28b61ba0beef011cd7d160.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 208, + 473, + 280, + 522 + ], + "blocks": [ + { + "bbox": [ + 208, + 473, + 280, + 522 + ], + "lines": [ + { + "bbox": [ + 208, + 473, + 280, + 522 + ], + "spans": [ + { + "bbox": [ + 208, + 473, + 280, + 522 + ], + "type": "image", + "image_path": "6f7393447d4c0dd9cc43b403fb767ccae61d13073cbf4e7b6de1e3bbffbc5cfd.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 235, + 526, + 253, + 535 + ], + "lines": [ + { + "bbox": [ + 235, + 526, + 253, + 535 + ], + "spans": [ + { + "bbox": [ + 235, + 526, + 253, + 535 + ], + "type": "text", + "content": "36.9" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_caption" + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 282, + 373, + 354, + 422 + ], + "blocks": [ + { + "bbox": [ + 282, + 373, + 354, + 422 + ], + "lines": [ + { + "bbox": [ + 282, + 373, + 354, + 422 + ], + "spans": [ + { + "bbox": [ + 282, + 373, + 354, + 422 + ], + "type": "image", + "image_path": "e9ec594631498e2a1f8e661bedae31c443f9b190e1be8f521a61188f4d8f033b.jpg" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_body" + } + ], + "index": 40 + }, + { + "type": "image", + "bbox": [ + 282, + 423, + 354, + 472 + ], + "blocks": [ + { + "bbox": [ + 282, + 423, + 354, + 472 + ], + "lines": [ + { + "bbox": [ + 282, + 423, + 354, + 472 + ], + "spans": [ + { + "bbox": [ + 282, + 423, + 354, + 472 + ], + "type": "image", + "image_path": "d489e64ba5236d20a8ccd8750097eec10c9d7fb5eb57f9ae796871bfa2203c22.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 282, + 473, + 355, + 522 + ], + "blocks": [ + { + "bbox": [ + 282, + 473, + 355, + 522 + ], + "lines": [ + { + "bbox": [ + 282, + 473, + 355, + 522 + ], + "spans": [ + { + "bbox": [ + 282, + 473, + 355, + 522 + ], + "type": "image", + "image_path": "7c825dcf01a94678b59a1ac7f191122428497b7adf966110443aeb1232b68996.jpg" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 526, + 328, + 535 + ], + "lines": [ + { + "bbox": [ + 309, + 526, + 328, + 535 + ], + "spans": [ + { + "bbox": [ + 309, + 526, + 328, + 535 + ], + "type": "text", + "content": "48.9" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 97, + 539, + 511, + 564 + ], + "lines": [ + { + "bbox": [ + 97, + 539, + 511, + 564 + ], + "spans": [ + { + "bbox": [ + 97, + 539, + 511, + 564 + ], + "type": "text", + "content": "Input Text: \"A beautiful garden filled with red roses and green leaves; with * mm lens\" & \"Adjust the lens to * mm\"." + } + ] + } + ], + "index": 52, + "angle": 0, + "type": "image_caption" + } + ], + "index": 42 + }, + { + "type": "image", + "bbox": [ + 358, + 373, + 430, + 422 + ], + "blocks": [ + { + "bbox": [ + 358, + 373, + 430, + 422 + ], + "lines": [ + { + "bbox": [ + 358, + 373, + 430, + 422 + ], + "spans": [ + { + "bbox": [ + 358, + 373, + 430, + 422 + ], + "type": "image", + "image_path": "7b779ff8fb58b3aae04af13166a3b1967efcf87892bd378886be402ea8763601.jpg" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_body" + } + ], + "index": 44 + }, + { + "type": "image", + "bbox": [ + 358, + 423, + 430, + 472 + ], + "blocks": [ + { + "bbox": [ + 358, + 423, + 430, + 472 + ], + "lines": [ + { + "bbox": [ + 358, + 423, + 430, + 472 + ], + "spans": [ + { + "bbox": [ + 358, + 423, + 430, + 472 + ], + "type": "image", + "image_path": "889fe3842b7457a957d4f92c06ccec58df898a9c56de1a94074c190c063efac7.jpg" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_body" + } + ], + "index": 45 + }, + { + "type": "image", + "bbox": [ + 358, + 473, + 430, + 522 + ], + "blocks": [ + { + "bbox": [ + 358, + 473, + 430, + 522 + ], + "lines": [ + { + "bbox": [ + 358, + 473, + 430, + 522 + ], + "spans": [ + { + "bbox": [ + 358, + 473, + 430, + 522 + ], + "type": "image", + "image_path": "b5bfd8b85c6c3bc556cebefab62a44ff37c4d3e8e69240fa38a6ac13c7cc168b.jpg" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 384, + 526, + 403, + 535 + ], + "lines": [ + { + "bbox": [ + 384, + 526, + 403, + 535 + ], + "spans": [ + { + "bbox": [ + 384, + 526, + 403, + 535 + ], + "type": "text", + "content": "60.9" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 77, + 581, + 533, + 681 + ], + "lines": [ + { + "bbox": [ + 77, + 581, + 533, + 681 + ], + "spans": [ + { + "bbox": [ + 77, + 581, + 533, + 681 + ], + "type": "text", + "content": "Figure 41: Task: Camera control. The goal is to generate images aligned with specific photographic parameters, such as bokeh blur, focal length, shutter speed, and color temperature. Setup: Results are based on text prompts collected from [118], comparing outputs from GPT-4o, Gemini 2.0 Flash [99], and Generative Photography (GP) [118]. Each row includes the input text instructions and corresponding outputs. Observations: GPT-4o demonstrates strong performance in controlling bokeh blur, producing visually appealing and parameter-aligned results. However, it shows limitations in handling focal length, occasionally generating inconsistent or less accurate outputs. By contrast, Gemini 2.0 Flash struggles significantly in both aspects, often failing to produce coherent results. Overall, GPT-4o achieves better performance in this task but still requires further refinement to enhance focal length control." + } + ] + } + ], + "index": 53, + "angle": 0, + "type": "image_caption" + } + ], + "index": 46 + }, + { + "type": "image", + "bbox": [ + 432, + 373, + 506, + 422 + ], + "blocks": [ + { + "bbox": [ + 432, + 373, + 506, + 422 + ], + "lines": [ + { + "bbox": [ + 432, + 373, + 506, + 422 + ], + "spans": [ + { + "bbox": [ + 432, + 373, + 506, + 422 + ], + "type": "image", + "image_path": "099e4ec78e062250f7bdab074cc4518ca8ed428e187a0bb348ddc4b8e86c87a1.jpg" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_body" + } + ], + "index": 48 + }, + { + "type": "image", + "bbox": [ + 432, + 423, + 506, + 472 + ], + "blocks": [ + { + "bbox": [ + 432, + 423, + 506, + 472 + ], + "lines": [ + { + "bbox": [ + 432, + 423, + 506, + 472 + ], + "spans": [ + { + "bbox": [ + 432, + 423, + 506, + 472 + ], + "type": "image", + "image_path": "74da7612dd6d92ecd657e1f44bdbfc6138a44b8f5bad9b26166f4cd12e4bcb65.jpg" + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_body" + } + ], + "index": 49 + }, + { + "type": "image", + "bbox": [ + 432, + 473, + 506, + 522 + ], + "blocks": [ + { + "bbox": [ + 432, + 473, + 506, + 522 + ], + "lines": [ + { + "bbox": [ + 432, + 473, + 506, + 522 + ], + "spans": [ + { + "bbox": [ + 432, + 473, + 506, + 522 + ], + "type": "image", + "image_path": "e3c6465c6ac99b5cac8b7b87979c048e1ec6ea06f3001201cdafef489c4470e1.jpg" + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 459, + 526, + 477, + 535 + ], + "lines": [ + { + "bbox": [ + 459, + 526, + 477, + 535 + ], + "spans": [ + { + "bbox": [ + 459, + 526, + 477, + 535 + ], + "type": "text", + "content": "69.9" + } + ] + } + ], + "index": 51, + "angle": 0, + "type": "image_caption" + } + ], + "index": 50 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "54" + } + ] + } + ], + "index": 54 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 53 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 121, + 181, + 132 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 121, + 181, + 132 + ], + "spans": [ + { + "bbox": [ + 107, + 121, + 181, + 132 + ], + "type": "text", + "content": "Camera Control" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 148, + 144, + 473, + 159 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 144, + 473, + 159 + ], + "spans": [ + { + "bbox": [ + 148, + 144, + 473, + 159 + ], + "type": "text", + "content": "Evaluation: Camera setting adjustment, semantic consistency." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 94, + 183, + 128, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 183, + 128, + 193 + ], + "spans": [ + { + "bbox": [ + 94, + 183, + 128, + 193 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 133, + 164, + 205, + 213 + ], + "blocks": [ + { + "bbox": [ + 133, + 164, + 205, + 213 + ], + "lines": [ + { + "bbox": [ + 133, + 164, + 205, + 213 + ], + "spans": [ + { + "bbox": [ + 133, + 164, + 205, + 213 + ], + "type": "image", + "image_path": "b0d136fb714a9be563b2f3204d0e0bad1bfffa49452c0d77a5bdfb3a14e364d0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 208, + 164, + 280, + 213 + ], + "blocks": [ + { + "bbox": [ + 208, + 164, + 280, + 213 + ], + "lines": [ + { + "bbox": [ + 208, + 164, + 280, + 213 + ], + "spans": [ + { + "bbox": [ + 208, + 164, + 280, + 213 + ], + "type": "image", + "image_path": "6353301ea1a4bd02c64e31effbe95a706a80640c23bedd6dff2583457c4933e0.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 283, + 164, + 354, + 213 + ], + "blocks": [ + { + "bbox": [ + 283, + 164, + 354, + 213 + ], + "lines": [ + { + "bbox": [ + 283, + 164, + 354, + 213 + ], + "spans": [ + { + "bbox": [ + 283, + 164, + 354, + 213 + ], + "type": "image", + "image_path": "f748ae519d07deeba518763dcddb8fac9d6dd17225e417618ff62357c72bb0c3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 357, + 164, + 430, + 213 + ], + "blocks": [ + { + "bbox": [ + 357, + 164, + 430, + 213 + ], + "lines": [ + { + "bbox": [ + 357, + 164, + 430, + 213 + ], + "spans": [ + { + "bbox": [ + 357, + 164, + 430, + 213 + ], + "type": "image", + "image_path": "a145180a5acb6bef4bb07f8a9b725b80a966f60fdc5991366b23f4cc7d100d3d.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 432, + 164, + 505, + 213 + ], + "blocks": [ + { + "bbox": [ + 432, + 164, + 505, + 213 + ], + "lines": [ + { + "bbox": [ + 432, + 164, + 505, + 213 + ], + "spans": [ + { + "bbox": [ + 432, + 164, + 505, + 213 + ], + "type": "image", + "image_path": "d215069ec65dc0f111efbae6b16c00981d2ebba9711197049d3d10d379a5fe6f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 90, + 228, + 129, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 228, + 129, + 254 + ], + "spans": [ + { + "bbox": [ + 90, + 228, + 129, + 254 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 133, + 213, + 205, + 263 + ], + "blocks": [ + { + "bbox": [ + 133, + 213, + 205, + 263 + ], + "lines": [ + { + "bbox": [ + 133, + 213, + 205, + 263 + ], + "spans": [ + { + "bbox": [ + 133, + 213, + 205, + 263 + ], + "type": "image", + "image_path": "34364cd752d523f7253e2bed26e26e7bb958064d8b44d8df3b0c491d05214cb2.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 209, + 213, + 280, + 263 + ], + "blocks": [ + { + "bbox": [ + 209, + 213, + 280, + 263 + ], + "lines": [ + { + "bbox": [ + 209, + 213, + 280, + 263 + ], + "spans": [ + { + "bbox": [ + 209, + 213, + 280, + 263 + ], + "type": "image", + "image_path": "8107bc9ed92dde4a8075837a95dd9e6711b30dd5e1a18eb7914511ac730f010f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 283, + 213, + 355, + 263 + ], + "blocks": [ + { + "bbox": [ + 283, + 213, + 355, + 263 + ], + "lines": [ + { + "bbox": [ + 283, + 213, + 355, + 263 + ], + "spans": [ + { + "bbox": [ + 283, + 213, + 355, + 263 + ], + "type": "image", + "image_path": "e8ab46f36bede74d3eb3841233ac4c2a6eb5643b1d97d4921f99dd2c221aef40.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 358, + 213, + 430, + 263 + ], + "blocks": [ + { + "bbox": [ + 358, + 213, + 430, + 263 + ], + "lines": [ + { + "bbox": [ + 358, + 213, + 430, + 263 + ], + "spans": [ + { + "bbox": [ + 358, + 213, + 430, + 263 + ], + "type": "image", + "image_path": "a26d98ea46dba3acd2775aa3134da02bf0e703e9b8a805247053fdfe1b12d8bc.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 433, + 213, + 505, + 263 + ], + "blocks": [ + { + "bbox": [ + 433, + 213, + 505, + 263 + ], + "lines": [ + { + "bbox": [ + 433, + 213, + 505, + 263 + ], + "spans": [ + { + "bbox": [ + 433, + 213, + 505, + 263 + ], + "type": "image", + "image_path": "caed113ea41a0bbb54c520a911d0e22a31ace6434333320e221dd66e5f86ef33.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 285, + 118, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 285, + 118, + 296 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 118, + 296 + ], + "type": "text", + "content": "GP" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 133, + 264, + 205, + 314 + ], + "blocks": [ + { + "bbox": [ + 133, + 264, + 205, + 314 + ], + "lines": [ + { + "bbox": [ + 133, + 264, + 205, + 314 + ], + "spans": [ + { + "bbox": [ + 133, + 264, + 205, + 314 + ], + "type": "image", + "image_path": "9d2ccc2dc8ada5802d342f8533d6e9a320919119569352e82afb4bf3500698e0.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 159, + 318, + 178, + 327 + ], + "lines": [ + { + "bbox": [ + 159, + 318, + 178, + 327 + ], + "spans": [ + { + "bbox": [ + 159, + 318, + 178, + 327 + ], + "type": "text", + "content": "0.88" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 209, + 264, + 280, + 314 + ], + "blocks": [ + { + "bbox": [ + 209, + 264, + 280, + 314 + ], + "lines": [ + { + "bbox": [ + 209, + 264, + 280, + 314 + ], + "spans": [ + { + "bbox": [ + 209, + 264, + 280, + 314 + ], + "type": "image", + "image_path": "dd17353d44f66bbb4b35301466dd1088e56a6dbb759f8d6e1a798e291070cdb4.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 234, + 318, + 253, + 327 + ], + "lines": [ + { + "bbox": [ + 234, + 318, + 253, + 327 + ], + "spans": [ + { + "bbox": [ + 234, + 318, + 253, + 327 + ], + "type": "text", + "content": "0.68" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 283, + 264, + 355, + 314 + ], + "blocks": [ + { + "bbox": [ + 283, + 264, + 355, + 314 + ], + "lines": [ + { + "bbox": [ + 283, + 264, + 355, + 314 + ], + "spans": [ + { + "bbox": [ + 283, + 264, + 355, + 314 + ], + "type": "image", + "image_path": "c65293d5487451040c046c09049d817ac48c8d7e2a82e633e0d317f853e37512.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 318, + 328, + 327 + ], + "lines": [ + { + "bbox": [ + 309, + 318, + 328, + 327 + ], + "spans": [ + { + "bbox": [ + 309, + 318, + 328, + 327 + ], + "type": "text", + "content": "0.48" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 358, + 264, + 430, + 314 + ], + "blocks": [ + { + "bbox": [ + 358, + 264, + 430, + 314 + ], + "lines": [ + { + "bbox": [ + 358, + 264, + 430, + 314 + ], + "spans": [ + { + "bbox": [ + 358, + 264, + 430, + 314 + ], + "type": "image", + "image_path": "1a0a8291fc0eb54d480890af32530069802f1d52bc82f8da414ec15032e47f7b.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 384, + 318, + 403, + 327 + ], + "lines": [ + { + "bbox": [ + 384, + 318, + 403, + 327 + ], + "spans": [ + { + "bbox": [ + 384, + 318, + 403, + 327 + ], + "type": "text", + "content": "0.38" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 432, + 264, + 505, + 314 + ], + "blocks": [ + { + "bbox": [ + 432, + 264, + 505, + 314 + ], + "lines": [ + { + "bbox": [ + 432, + 264, + 505, + 314 + ], + "spans": [ + { + "bbox": [ + 432, + 264, + 505, + 314 + ], + "type": "image", + "image_path": "0b0cfcb2c756d34ecf6d719996e6b93b05c237d3b9298e8a45b297f87db15d04.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 459, + 318, + 477, + 327 + ], + "lines": [ + { + "bbox": [ + 459, + 318, + 477, + 327 + ], + "spans": [ + { + "bbox": [ + 459, + 318, + 477, + 327 + ], + "type": "text", + "content": "0.28" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "bbox": [ + 94, + 383, + 128, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 383, + 128, + 393 + ], + "spans": [ + { + "bbox": [ + 94, + 383, + 128, + 393 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 133, + 363, + 205, + 412 + ], + "blocks": [ + { + "bbox": [ + 93, + 329, + 500, + 354 + ], + "lines": [ + { + "bbox": [ + 93, + 329, + 500, + 354 + ], + "spans": [ + { + "bbox": [ + 93, + 329, + 500, + 354 + ], + "type": "text", + "content": "Input Text: \"A blue pot with a plant in it is placed on a window sill, surrounded by other potted plants; with shutter speed * second\" & \"Adjust the shutter speed to * second\"." + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 133, + 363, + 205, + 412 + ], + "lines": [ + { + "bbox": [ + 133, + 363, + 205, + 412 + ], + "spans": [ + { + "bbox": [ + 133, + 363, + 205, + 412 + ], + "type": "image", + "image_path": "2f8ba2525f180355e9429c8aa78eab3fdc5e1c1c77db35dd88729c6608f735e8.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 208, + 363, + 280, + 412 + ], + "blocks": [ + { + "bbox": [ + 208, + 363, + 280, + 412 + ], + "lines": [ + { + "bbox": [ + 208, + 363, + 280, + 412 + ], + "spans": [ + { + "bbox": [ + 208, + 363, + 280, + 412 + ], + "type": "image", + "image_path": "e2bafc2e24743ad788bd64f544fbd0056daacff8e5ccf84bf8a7e7d5bd57b702.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 283, + 363, + 355, + 412 + ], + "blocks": [ + { + "bbox": [ + 283, + 363, + 355, + 412 + ], + "lines": [ + { + "bbox": [ + 283, + 363, + 355, + 412 + ], + "spans": [ + { + "bbox": [ + 283, + 363, + 355, + 412 + ], + "type": "image", + "image_path": "65a3cacd9307277d2c15e32c391395ee4d10278b0cde3e7007004223bb26c6c9.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 358, + 363, + 430, + 412 + ], + "blocks": [ + { + "bbox": [ + 358, + 363, + 430, + 412 + ], + "lines": [ + { + "bbox": [ + 358, + 363, + 430, + 412 + ], + "spans": [ + { + "bbox": [ + 358, + 363, + 430, + 412 + ], + "type": "image", + "image_path": "4851ec2102693629a48d4c59a074117a859cc53c4e7a1a739b2b4623d6768712.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 433, + 363, + 506, + 412 + ], + "blocks": [ + { + "bbox": [ + 433, + 363, + 506, + 412 + ], + "lines": [ + { + "bbox": [ + 433, + 363, + 506, + 412 + ], + "spans": [ + { + "bbox": [ + 433, + 363, + 506, + 412 + ], + "type": "image", + "image_path": "c55187e3cf1b46125c32de8fa1ced6d268639035921d9b205dcf8ee53eaef80f.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + } + ], + "index": 31 + }, + { + "bbox": [ + 90, + 427, + 131, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 427, + 131, + 453 + ], + "spans": [ + { + "bbox": [ + 90, + 427, + 131, + 453 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 133, + 414, + 205, + 461 + ], + "blocks": [ + { + "bbox": [ + 133, + 414, + 205, + 461 + ], + "lines": [ + { + "bbox": [ + 133, + 414, + 205, + 461 + ], + "spans": [ + { + "bbox": [ + 133, + 414, + 205, + 461 + ], + "type": "image", + "image_path": "fe6974e0162720276dc4d0b553ff8e8fdee18edf7ebaa81e761e6d01fa924c91.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 208, + 415, + 280, + 461 + ], + "blocks": [ + { + "bbox": [ + 208, + 415, + 280, + 461 + ], + "lines": [ + { + "bbox": [ + 208, + 415, + 280, + 461 + ], + "spans": [ + { + "bbox": [ + 208, + 415, + 280, + 461 + ], + "type": "image", + "image_path": "c30bd99d0cf615babc90bf1be839dd3875329913446e1f8f05070addcf8105c3.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 283, + 414, + 355, + 461 + ], + "blocks": [ + { + "bbox": [ + 283, + 414, + 355, + 461 + ], + "lines": [ + { + "bbox": [ + 283, + 414, + 355, + 461 + ], + "spans": [ + { + "bbox": [ + 283, + 414, + 355, + 461 + ], + "type": "image", + "image_path": "de07423ffdd0dc744cf6f4c7b38144d08ecc3112098ef6d6efbfb06253fb9def.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 358, + 415, + 430, + 461 + ], + "blocks": [ + { + "bbox": [ + 358, + 415, + 430, + 461 + ], + "lines": [ + { + "bbox": [ + 358, + 415, + 430, + 461 + ], + "spans": [ + { + "bbox": [ + 358, + 415, + 430, + 461 + ], + "type": "image", + "image_path": "9df5012deab1c75c8cc3c9b199f8d51adaf06952378b5293634b0a061a791b0f.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 433, + 415, + 506, + 461 + ], + "blocks": [ + { + "bbox": [ + 433, + 415, + 506, + 461 + ], + "lines": [ + { + "bbox": [ + 433, + 415, + 506, + 461 + ], + "spans": [ + { + "bbox": [ + 433, + 415, + 506, + 461 + ], + "type": "image", + "image_path": "76238724fe5435d83934f4d9ceaa07fd016a8c9970f6377cd6eb7afa1409a5d4.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 484, + 118, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 484, + 118, + 494 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 118, + 494 + ], + "type": "text", + "content": "GP" + } + ] + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 133, + 462, + 205, + 511 + ], + "blocks": [ + { + "bbox": [ + 133, + 462, + 205, + 511 + ], + "lines": [ + { + "bbox": [ + 133, + 462, + 205, + 511 + ], + "spans": [ + { + "bbox": [ + 133, + 462, + 205, + 511 + ], + "type": "image", + "image_path": "077315bfabdff852ee00ffcd33f6dc6141773a534acf687e5c1e7ad5284a7a19.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 154, + 515, + 183, + 525 + ], + "lines": [ + { + "bbox": [ + 154, + 515, + 183, + 525 + ], + "spans": [ + { + "bbox": [ + 154, + 515, + 183, + 525 + ], + "type": "text", + "content": "3100.0" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 208, + 462, + 280, + 511 + ], + "blocks": [ + { + "bbox": [ + 208, + 462, + 280, + 511 + ], + "lines": [ + { + "bbox": [ + 208, + 462, + 280, + 511 + ], + "spans": [ + { + "bbox": [ + 208, + 462, + 280, + 511 + ], + "type": "image", + "image_path": "334e57c72d615f0c16cf7f289ed3e1236adf82ed7e69d06552fc24a1910711db.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 229, + 515, + 258, + 525 + ], + "lines": [ + { + "bbox": [ + 229, + 515, + 258, + 525 + ], + "spans": [ + { + "bbox": [ + 229, + 515, + 258, + 525 + ], + "type": "text", + "content": "4000.0" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_caption" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 283, + 462, + 355, + 511 + ], + "blocks": [ + { + "bbox": [ + 283, + 462, + 355, + 511 + ], + "lines": [ + { + "bbox": [ + 283, + 462, + 355, + 511 + ], + "spans": [ + { + "bbox": [ + 283, + 462, + 355, + 511 + ], + "type": "image", + "image_path": "838fcf545b25f35bfeec3bef84c6f0c0db6ecfcb9f88eaec03caefa49c2b0b1c.jpg" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 515, + 333, + 525 + ], + "lines": [ + { + "bbox": [ + 304, + 515, + 333, + 525 + ], + "spans": [ + { + "bbox": [ + 304, + 515, + 333, + 525 + ], + "type": "text", + "content": "8000.0" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 97, + 529, + 507, + 565 + ], + "lines": [ + { + "bbox": [ + 97, + 529, + 507, + 565 + ], + "spans": [ + { + "bbox": [ + 97, + 529, + 507, + 565 + ], + "type": "text", + "content": "Input Text: \"A collection of trash cans and a potted plant are seen in the image. The trash cans are individually in blue, black and yellow; with temperature * kelvin\" & \"Adjust the temperature to * kelvin\"." + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_caption" + } + ], + "index": 43 + }, + { + "type": "image", + "bbox": [ + 358, + 462, + 430, + 511 + ], + "blocks": [ + { + "bbox": [ + 358, + 462, + 430, + 511 + ], + "lines": [ + { + "bbox": [ + 358, + 462, + 430, + 511 + ], + "spans": [ + { + "bbox": [ + 358, + 462, + 430, + 511 + ], + "type": "image", + "image_path": "3569ee4edc461e7da1c8372a4a522d92cca61c99b430368708a6b7dc82703262.jpg" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 380, + 515, + 408, + 525 + ], + "lines": [ + { + "bbox": [ + 380, + 515, + 408, + 525 + ], + "spans": [ + { + "bbox": [ + 380, + 515, + 408, + 525 + ], + "type": "text", + "content": "7000.0" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 77, + 578, + 533, + 678 + ], + "lines": [ + { + "bbox": [ + 77, + 578, + 533, + 678 + ], + "spans": [ + { + "bbox": [ + 77, + 578, + 533, + 678 + ], + "type": "text", + "content": "Figure 42: Task: Camera control. The goal is to generate images aligned with specific photographic parameters, such as bokeh blur, focal length, shutter speed, and color temperature. Setup: Results are based on text prompts collected from [118], comparing outputs from GPT-4o, Gemini 2.0 Flash [99], and Generative Photography (GP) [118]. Each row includes the input text instructions and corresponding outputs. Observations: GPT-4o demonstrates strong performance in controlling color temperature, producing coherent and visually accurate results. However, it struggles with shutter speed, occasionally resulting in inconsistent or unrealistic motion effects. In contrast, Gemini 2.0 Flash fails to consistently handle either parameter, often producing outputs that lack alignment with the desired settings. Overall, GPT-4o outperforms Gemini 2.0 Flash in this task, but further improvements are needed for precise shutter speed control." + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_caption" + } + ], + "index": 45 + }, + { + "type": "image", + "bbox": [ + 433, + 462, + 506, + 511 + ], + "blocks": [ + { + "bbox": [ + 433, + 462, + 506, + 511 + ], + "lines": [ + { + "bbox": [ + 433, + 462, + 506, + 511 + ], + "spans": [ + { + "bbox": [ + 433, + 462, + 506, + 511 + ], + "type": "image", + "image_path": "a99706fb05eb2a9239f7691575789b91486fcd20c7175b2b5dd9b1fbd088f850.jpg" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 454, + 515, + 483, + 525 + ], + "lines": [ + { + "bbox": [ + 454, + 515, + 483, + 525 + ], + "spans": [ + { + "bbox": [ + 454, + 515, + 483, + 525 + ], + "type": "text", + "content": "3000.0" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_caption" + } + ], + "index": 47 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "55" + } + ] + } + ], + "index": 51 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 54 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 233, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 233, + 85 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 233, + 85 + ], + "type": "text", + "content": "2.2.8 In-context Visual Prompting" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 91, + 533, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 91, + 533, + 169 + ], + "spans": [ + { + "bbox": [ + 77, + 91, + 533, + 169 + ], + "type": "text", + "content": "The in-context visual prompting tasks aim at understanding and executing specific tasks on new query images by leveraging a pair of task-specific example images and accompanying text instructions. Previous works [105, 18, 52] have explored this capability in the context of diffusion and autoregressive models, demonstrating its potential in enhancing model adaptability. The significance of in-context visual prompting lies in its ability to enable models to generalize to novel tasks. This approach mirrors human-like learning, where new tasks can be understood and performed by observing relevant examples. This capability has broad implications across various domains, and paves the way for more flexible and efficient paradigms capable of adapting to a wide range of specific tasks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 173, + 533, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 173, + 533, + 207 + ], + "spans": [ + { + "bbox": [ + 77, + 173, + 533, + 207 + ], + "type": "text", + "content": "We curate four representative tasks to evaluate the performance of GPT-4o in in-context visual prompting. These tasks are designed to assess the model's ability to understand and adapt to specific visual tasks based on provided examples and guidance, including:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 215, + 530, + 326 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 105, + 215, + 530, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 215, + 530, + 237 + ], + "spans": [ + { + "bbox": [ + 105, + 215, + 530, + 237 + ], + "type": "text", + "content": "- Movie-Shot Generation: A three-shot image collected from [42] is provided as an example, and the model is instructed to follow this format to generate similar movie shots for the query image." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 241, + 530, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 241, + 530, + 263 + ], + "spans": [ + { + "bbox": [ + 105, + 241, + 530, + 263 + ], + "type": "text", + "content": "- Ray-Tracing Rendering: An example gaming scene is provided with and without ray tracing, and the model is expected to render a ray-traced version of the query image." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 266, + 530, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 266, + 530, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 266, + 530, + 300 + ], + "type": "text", + "content": "- Overlaid Mask Visualization: The model receives an original image accompanied by its corresponding segmented results from [49] and is tasked with outputting the segmented results in the same format for the query image." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 304, + 530, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 530, + 326 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 530, + 326 + ], + "type": "text", + "content": "- Maze Solving: A maze and its corresponding solution path are provided as examples, and the model is required to draw the solution path for a new maze presented in the query image." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 77, + 335, + 533, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 335, + 533, + 434 + ], + "spans": [ + { + "bbox": [ + 77, + 335, + 533, + 434 + ], + "type": "text", + "content": "All the results are illustrated in Figure 43. Compared with Gemini 2.0 Flash [99], GPT-4o demonstrates promising performance in movie-shot generation and ray-tracing rendering tasks, showcasing its ability to follow example formats and generate visually coherent outputs. However, it still struggles with maintaining consistent visual semantics across the generated outputs. For the overlaid mask visualization task, GPT-4o falls short in effectively executing the instructions. The result fails to adhere to the required format, indicating that the model's ability to process and generate complex outputs remains limited. For maze solving, a task that demands advanced visual reasoning and logical inference, GPT-4o struggles significantly. This highlights the challenges in combining higher-level reasoning with visual generation capabilities, suggesting that more sophisticated reasoning mechanisms are needed for tasks of this nature." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 77, + 438, + 533, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 438, + 533, + 472 + ], + "spans": [ + { + "bbox": [ + 77, + 438, + 533, + 472 + ], + "type": "text", + "content": "In summary, GPT-4o shows considerable potential in in-context visual prompting, while it still underperforms in certain difficult tasks. These observations suggest that further advancements are necessary to enhance its generation and reasoning capabilities for more complex and diverse visual tasks." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "56" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 55 + }, + { + "para_blocks": [ + { + "bbox": [ + 123, + 89, + 243, + 101 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 89, + 243, + 101 + ], + "spans": [ + { + "bbox": [ + 123, + 89, + 243, + 101 + ], + "type": "text", + "content": "In-Context Visual Prompting" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 118, + 109, + 130, + 122 + ], + "blocks": [ + { + "bbox": [ + 118, + 109, + 130, + 122 + ], + "lines": [ + { + "bbox": [ + 118, + 109, + 130, + 122 + ], + "spans": [ + { + "bbox": [ + 118, + 109, + 130, + 122 + ], + "type": "image", + "image_path": "751b8b0b723ad8d89ba3583fc6932d850eb7cf8a76f7215ed994d255821fb63e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 132, + 112, + 498, + 125 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 112, + 498, + 125 + ], + "spans": [ + { + "bbox": [ + 132, + 112, + 498, + 125 + ], + "type": "text", + "content": "Evaluation: Understanding and executing specific tasks with example images." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 120, + 129, + 169, + 194 + ], + "blocks": [ + { + "bbox": [ + 120, + 129, + 169, + 194 + ], + "lines": [ + { + "bbox": [ + 120, + 129, + 169, + 194 + ], + "spans": [ + { + "bbox": [ + 120, + 129, + 169, + 194 + ], + "type": "image", + "image_path": "7e5a6dca5061982b8b4f23f693eb84c50a375b77e068c461c42e68104c157985.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 170, + 129, + 269, + 194 + ], + "blocks": [ + { + "bbox": [ + 170, + 129, + 269, + 194 + ], + "lines": [ + { + "bbox": [ + 170, + 129, + 269, + 194 + ], + "spans": [ + { + "bbox": [ + 170, + 129, + 269, + 194 + ], + "type": "image", + "image_path": "70e1fc27eedb02031d28c9885667468714afce1b37646dd33a83ef114dce4e78.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 269, + 130, + 313, + 195 + ], + "blocks": [ + { + "bbox": [ + 269, + 130, + 313, + 195 + ], + "lines": [ + { + "bbox": [ + 269, + 130, + 313, + 195 + ], + "spans": [ + { + "bbox": [ + 269, + 130, + 313, + 195 + ], + "type": "image", + "image_path": "5f022a9d8700e671b60295a4ae5dd57ff7c7747fa42c0d5b9d6fb09f4fe1b014.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 313, + 130, + 495, + 195 + ], + "blocks": [ + { + "bbox": [ + 313, + 130, + 495, + 195 + ], + "lines": [ + { + "bbox": [ + 313, + 130, + 495, + 195 + ], + "spans": [ + { + "bbox": [ + 313, + 130, + 495, + 195 + ], + "type": "image", + "image_path": "49f93c593df590166d47657e6c0eda2ed7ef62cdd094e685de672efaec343681.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 119, + 197, + 481, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 197, + 481, + 219 + ], + "spans": [ + { + "bbox": [ + 119, + 197, + 481, + 219 + ], + "type": "text", + "content": "Input Text: \"The first image contains three movie shots. Please imitate this image and create the subsequent movie shots for the second image.\"" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 121, + 223, + 168, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 223, + 168, + 233 + ], + "spans": [ + { + "bbox": [ + 121, + 223, + 168, + 233 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 196, + 223, + 242, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 223, + 242, + 233 + ], + "spans": [ + { + "bbox": [ + 196, + 223, + 242, + 233 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 278, + 223, + 305, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 278, + 223, + 305, + 232 + ], + "spans": [ + { + "bbox": [ + 278, + 223, + 305, + 232 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 375, + 224, + 435, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 375, + 224, + 435, + 233 + ], + "spans": [ + { + "bbox": [ + 375, + 224, + 435, + 233 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 157, + 239, + 432, + 294 + ], + "blocks": [ + { + "bbox": [ + 157, + 239, + 432, + 294 + ], + "lines": [ + { + "bbox": [ + 157, + 239, + 432, + 294 + ], + "spans": [ + { + "bbox": [ + 157, + 239, + 432, + 294 + ], + "type": "image", + "image_path": "e63aaf3e7583e329733bdf0d0086b196c266732e65fcc124b54b13a2ae6da734.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 194, + 309, + 400, + 365 + ], + "blocks": [ + { + "bbox": [ + 230, + 298, + 276, + 308 + ], + "lines": [ + { + "bbox": [ + 230, + 298, + 276, + 308 + ], + "spans": [ + { + "bbox": [ + 230, + 298, + 276, + 308 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 194, + 309, + 400, + 365 + ], + "lines": [ + { + "bbox": [ + 194, + 309, + 400, + 365 + ], + "spans": [ + { + "bbox": [ + 194, + 309, + 400, + 365 + ], + "type": "image", + "image_path": "05f1f17e9903bfc312375e98f666c3fae0cca18b6d2b7c60fe1605e04fefa3d0.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 225, + 368, + 271, + 377 + ], + "lines": [ + { + "bbox": [ + 225, + 368, + 271, + 377 + ], + "spans": [ + { + "bbox": [ + 225, + 368, + 271, + 377 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 320, + 369, + 380, + 378 + ], + "lines": [ + { + "bbox": [ + 320, + 369, + 380, + 378 + ], + "spans": [ + { + "bbox": [ + 320, + 369, + 380, + 378 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 379, + 499, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 379, + 499, + 412 + ], + "spans": [ + { + "bbox": [ + 111, + 379, + 499, + 412 + ], + "type": "text", + "content": "Input Text: \"The first image includes an original gaming scene, and the scene enhanced with ray tracing. Please imitate this image and create the scene enhanced with ray tracing for the second image.\"" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 115, + 416, + 264, + 469 + ], + "blocks": [ + { + "bbox": [ + 115, + 416, + 264, + 469 + ], + "lines": [ + { + "bbox": [ + 115, + 416, + 264, + 469 + ], + "spans": [ + { + "bbox": [ + 115, + 416, + 264, + 469 + ], + "type": "image", + "image_path": "27c5eea52e6ca0c737ae2cf46a953e11f8a44064e52b132ad803df249ab60e5c.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 167, + 498, + 213, + 507 + ], + "lines": [ + { + "bbox": [ + 167, + 498, + 213, + 507 + ], + "spans": [ + { + "bbox": [ + 167, + 498, + 213, + 507 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 265, + 416, + 338, + 468 + ], + "blocks": [ + { + "bbox": [ + 265, + 416, + 338, + 468 + ], + "lines": [ + { + "bbox": [ + 265, + 416, + 338, + 468 + ], + "spans": [ + { + "bbox": [ + 265, + 416, + 338, + 468 + ], + "type": "image", + "image_path": "0755edc88c21d2311018ccff73e7808905f504f9fd16b4b534f5cee1942b18b2.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 341, + 416, + 416, + 469 + ], + "blocks": [ + { + "bbox": [ + 341, + 416, + 416, + 469 + ], + "lines": [ + { + "bbox": [ + 341, + 416, + 416, + 469 + ], + "spans": [ + { + "bbox": [ + 341, + 416, + 416, + 469 + ], + "type": "image", + "image_path": "2c63f34552d2fef8c8e86f7dd76a62315f4c1dd4bcb26b2198e0e7acd23d6ef7.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 417, + 416, + 493, + 469 + ], + "blocks": [ + { + "bbox": [ + 417, + 416, + 493, + 469 + ], + "lines": [ + { + "bbox": [ + 417, + 416, + 493, + 469 + ], + "spans": [ + { + "bbox": [ + 417, + 416, + 493, + 469 + ], + "type": "image", + "image_path": "1fb2c646d1d34c3087486d5485c780d38c7e518b41b9210914f00d583ccfd2b1.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 111, + 471, + 504, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 471, + 504, + 494 + ], + "spans": [ + { + "bbox": [ + 111, + 471, + 504, + 494 + ], + "type": "text", + "content": "Input Text: \"The first image shows an original image and its segmented results. Please imitate this image and output the segmented results in the same format for the second image.\"" + } + ] + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 125, + 523, + 169, + 567 + ], + "blocks": [ + { + "bbox": [ + 125, + 523, + 169, + 567 + ], + "lines": [ + { + "bbox": [ + 125, + 523, + 169, + 567 + ], + "spans": [ + { + "bbox": [ + 125, + 523, + 169, + 567 + ], + "type": "image", + "image_path": "4702bc6682345e1ecd0d870da7f607f3851d574f86f2af27b69b7ce8feb8d7b3.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 179, + 523, + 221, + 568 + ], + "blocks": [ + { + "bbox": [ + 179, + 523, + 221, + 568 + ], + "lines": [ + { + "bbox": [ + 179, + 523, + 221, + 568 + ], + "spans": [ + { + "bbox": [ + 179, + 523, + 221, + 568 + ], + "type": "image", + "image_path": "4cefd3a472618bf4ed1b900b8f0253d8ca072ad60e16e238d601280a1ff4ac20.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 239, + 519, + 291, + 571 + ], + "blocks": [ + { + "bbox": [ + 280, + 498, + 325, + 507 + ], + "lines": [ + { + "bbox": [ + 280, + 498, + 325, + 507 + ], + "spans": [ + { + "bbox": [ + 280, + 498, + 325, + 507 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 239, + 519, + 291, + 571 + ], + "lines": [ + { + "bbox": [ + 239, + 519, + 291, + 571 + ], + "spans": [ + { + "bbox": [ + 239, + 519, + 291, + 571 + ], + "type": "image", + "image_path": "a05ae86c57f255f29915d6f1ca7fbca481e453d5260a2ee80c1df5f1a84c31e1.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 301, + 519, + 351, + 570 + ], + "blocks": [ + { + "bbox": [ + 365, + 498, + 391, + 506 + ], + "lines": [ + { + "bbox": [ + 365, + 498, + 391, + 506 + ], + "spans": [ + { + "bbox": [ + 365, + 498, + 391, + 506 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 301, + 519, + 351, + 570 + ], + "lines": [ + { + "bbox": [ + 301, + 519, + 351, + 570 + ], + "spans": [ + { + "bbox": [ + 301, + 519, + 351, + 570 + ], + "type": "image", + "image_path": "fe38be0bc78d431b48a8e0538e8cea074d5ce5e421511c2e081ea26e6ad8173c.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 367, + 527, + 412, + 568 + ], + "blocks": [ + { + "bbox": [ + 367, + 527, + 412, + 568 + ], + "lines": [ + { + "bbox": [ + 367, + 527, + 412, + 568 + ], + "spans": [ + { + "bbox": [ + 367, + 527, + 412, + 568 + ], + "type": "image", + "image_path": "acc8a88501794d5cb5abb73802e25f714233bc33723c8874cf9c6c27f2a19f7d.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 422, + 527, + 466, + 568 + ], + "blocks": [ + { + "bbox": [ + 424, + 498, + 484, + 506 + ], + "lines": [ + { + "bbox": [ + 424, + 498, + 484, + 506 + ], + "spans": [ + { + "bbox": [ + 424, + 498, + 484, + 506 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 422, + 527, + 466, + 568 + ], + "lines": [ + { + "bbox": [ + 422, + 527, + 466, + 568 + ], + "spans": [ + { + "bbox": [ + 422, + 527, + 466, + 568 + ], + "type": "image", + "image_path": "2a04299128063c19b3d18cf2c84c9a40f476b3c8ef92b747667e18981dce0475.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "bbox": [ + 113, + 578, + 497, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 578, + 497, + 601 + ], + "spans": [ + { + "bbox": [ + 113, + 578, + 497, + 601 + ], + "type": "text", + "content": "Input Text: \"The first image displays an unsolved maze and the maze with a solution path in red. Please imitate this image and identify the solution path for the second image.\"" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 148, + 606, + 195, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 606, + 195, + 616 + ], + "spans": [ + { + "bbox": [ + 148, + 606, + 195, + 616 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 238, + 606, + 284, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 606, + 284, + 616 + ], + "spans": [ + { + "bbox": [ + 238, + 606, + 284, + 616 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 312, + 606, + 338, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 606, + 338, + 615 + ], + "spans": [ + { + "bbox": [ + 312, + 606, + 338, + 615 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 386, + 606, + 445, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 386, + 606, + 445, + 615 + ], + "spans": [ + { + "bbox": [ + 386, + 606, + 445, + 615 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 77, + 630, + 533, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 630, + 533, + 708 + ], + "spans": [ + { + "bbox": [ + 77, + 630, + 533, + 708 + ], + "type": "text", + "content": "Figure 43: Task: In-context visual prompting. The goal is to perform specific visual tasks on new query images based on task-specific example images and text instructions. Setup: Four representative tasks are evaluated: movie-shot generation, ray-tracing rendering, overlaid mask visualization, and maze solving. Each row includes example images, query images, and the corresponding outputs. Observations: GPT-4o excels in movie-shot generation and ray-tracing, producing coherent outputs but lacks consistency in visual semantics. It fails with overlaid mask visualization and maze solving, showing limits in complex task integration. While promising for in-context visual prompting, it needs refinement for more complex and reasoning-intensive tasks." + } + ] + } + ], + "index": 38 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "57" + } + ] + } + ], + "index": 39 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 56 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 185, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 185, + 83 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 185, + 83 + ], + "type": "text", + "content": "2.3 Image-to-3D Tasks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 92, + 533, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 92, + 533, + 116 + ], + "spans": [ + { + "bbox": [ + 77, + 92, + 533, + 116 + ], + "type": "text", + "content": "We evaluate the 3D understanding capabilities from 2D images of GPT-4o across three tasks: 2D image-to-3D modeling, 2D UV map-to-3D rendering, and novel view synthesis." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 78, + 126, + 206, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 126, + 206, + 138 + ], + "spans": [ + { + "bbox": [ + 78, + 126, + 206, + 138 + ], + "type": "text", + "content": "2.3.1 Image to 3D modeling" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 145, + 533, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 145, + 533, + 213 + ], + "spans": [ + { + "bbox": [ + 77, + 145, + 533, + 213 + ], + "type": "text", + "content": "Generating 3D models from monocular images boosts a wide range of applications, including augmented reality, virtual reality, and the gaming industry. This capability not only facilitates the content creation process but also mitigates the reliance on specialized 3D artists for creating 3D assets, which is more time- and cost-effective. Therefore, there is a growing research interest in generating 3D models from 2D images. Early methods on image-to-3D employ the learning-based approaches for single-view reconstruction [74, 77, 102, 79]. Recent works leverage the diffusion model prior to perform image-conditioned 3D generative modeling [69, 68, 83, 113]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 216, + 533, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 216, + 533, + 338 + ], + "spans": [ + { + "bbox": [ + 77, + 216, + 533, + 338 + ], + "type": "text", + "content": "In this section, we investigate the potential of GPT-4o for 3D modeling from 2D images. We begin by prompting GPT-4o to generate a Cinema 4D modeling interface to test its ability to produce coherent representations of structure, material, and wireframe based on the input image. As shown in Figure 44, GPT-4o can generate high-quality 3D model renderings within the application interface. Notably, the generated models exhibit clear wireframes and textures consistent with the input images. In contrast, Gemini 2.0 Flash and Midjourney v6.1 fail to achieve comparable results under the same conditions, which produce inconsistent renderings. We then prompt the GPT-4o to generate corresponding 3D object and material files in .obj and .mtl formats to further evaluate its understanding of the underlying structure in the rendered images. However, the output 3D models are coarse and inconsistent with input images, indicating that although GPT-4o can produce visually coherent 3D renderings, its capability to transform these into accurate and usable 3D object files remains limited. Additionally, Gemini 2.0 Flash and Midjourney v6.1 do not support exporting 3D models." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 78, + 348, + 218, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 348, + 218, + 361 + ], + "spans": [ + { + "bbox": [ + 78, + 348, + 218, + 361 + ], + "type": "text", + "content": "2.3.2 UV Map to 3D rendering" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 77, + 366, + 532, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 366, + 532, + 433 + ], + "spans": [ + { + "bbox": [ + 77, + 366, + 532, + 433 + ], + "type": "text", + "content": "UV maps are 2D images that store texture information for 3D models. In 3D modeling, geometric data is represented in 3D space, while texture data is defined in a 2D texture space. UV mapping is the process of projecting a 2D UV map onto a 3D model, accurately aligning texture with geometry. The UV mapping process can evaluate models' capability for 3D perception and spatial understanding. Moreover, this task has broad applications in design, helping to reduce the burden on designers to create product renderings from 2D maps manually and provide useful references." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 77, + 437, + 532, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 437, + 532, + 494 + ], + "spans": [ + { + "bbox": [ + 77, + 437, + 532, + 494 + ], + "type": "text", + "content": "As shown in Figure 45, GPT-4o exhibits a superior ability to generate consistent 3D renderings from 2D maps compared to Gemini 2.0 Flash and Midjourney v6.1. However, some outputs remain unsatisfactory, displaying inconsistencies in patterns and structure (see row 3 in Figure 45). Gemini 2.0 Flash struggles to correctly wrap the 3D model, though it maintains pattern consistency. Midjourney v6.1 tends to introduce additional, imagined features, which reduce controllability in this task." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 78, + 504, + 202, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 504, + 202, + 516 + ], + "spans": [ + { + "bbox": [ + 78, + 504, + 202, + 516 + ], + "type": "text", + "content": "2.3.3 Novel View Synthesis" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 77, + 523, + 533, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 523, + 533, + 601 + ], + "spans": [ + { + "bbox": [ + 77, + 523, + 533, + 601 + ], + "type": "text", + "content": "From a monocular view, humans can imagine an object's 3D shape and appearance since humans have collected enough prior knowledge for different objects throughout their daily lives. This ability to infer novel views of objects is essential for a wide range of tasks, from object manipulation to artistic creation such as painting. Early works achieve image-to-3D reconstruction using category-specific priors or large-scale pre-training [45, 80, 87, 32, 131]. Recent studies have shown that large diffusion models contain rich 3D prior information of the visual world, enabling them to perform novel view synthesis [69, 68, 83, 70]. These novel views can then be used for zero-shot 3D reconstruction using different 3D representations such as NeRF [76], mesh, or SDF." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 77, + 605, + 533, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 605, + 533, + 673 + ], + "spans": [ + { + "bbox": [ + 77, + 605, + 533, + 673 + ], + "type": "text", + "content": "In this section, we evaluate the ability of GPT-4o for novel view synthesis on objects with artistic styles and asymmetric geometry. As shown in Figure 46, for artistically styled objects, GPT-4o and Gemini 2.0 Flash largely preserve structural consistency with the input image, although they may change some elements or fine details. For the asymmetric object, GPT-4o can preserve the object scale and size better than Gemini 2.0 Flash. However, Midjourney v6.1 fails to generate consistent novel views, instead producing visually appealing images that do not align with the given prompt of this task." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "58" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 57 + }, + { + "para_blocks": [ + { + "bbox": [ + 100, + 135, + 195, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 135, + 195, + 148 + ], + "spans": [ + { + "bbox": [ + 100, + 135, + 195, + 148 + ], + "type": "text", + "content": "Image to 3D Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 155, + 160, + 470, + 175 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 160, + 470, + 175 + ], + "spans": [ + { + "bbox": [ + 155, + 160, + 470, + 175 + ], + "type": "text", + "content": "Evaluation: Shape/texture consistency, wireframe plausibility." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 100, + 179, + 197, + 275 + ], + "blocks": [ + { + "bbox": [ + 100, + 179, + 197, + 275 + ], + "lines": [ + { + "bbox": [ + 100, + 179, + 197, + 275 + ], + "spans": [ + { + "bbox": [ + 100, + 179, + 197, + 275 + ], + "type": "image", + "image_path": "1f9b0b011e88c337096f493dbdc16bbafefb87c7ac6d0817b51a02d65ae211a2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 204, + 179, + 301, + 275 + ], + "blocks": [ + { + "bbox": [ + 204, + 179, + 301, + 275 + ], + "lines": [ + { + "bbox": [ + 204, + 179, + 301, + 275 + ], + "spans": [ + { + "bbox": [ + 204, + 179, + 301, + 275 + ], + "type": "image", + "image_path": "20925173299f15c6d10bdcdcb9ec379e65b18d75af2a57d652be5a8ff0b8001d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 309, + 179, + 406, + 275 + ], + "blocks": [ + { + "bbox": [ + 309, + 179, + 406, + 275 + ], + "lines": [ + { + "bbox": [ + 309, + 179, + 406, + 275 + ], + "spans": [ + { + "bbox": [ + 309, + 179, + 406, + 275 + ], + "type": "image", + "image_path": "afb58fab9b72f3bf1b78d415b4d5e8ca817660113f2b9a1da7dee7153ca97330.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 413, + 179, + 510, + 275 + ], + "blocks": [ + { + "bbox": [ + 413, + 179, + 510, + 275 + ], + "lines": [ + { + "bbox": [ + 413, + 179, + 510, + 275 + ], + "spans": [ + { + "bbox": [ + 413, + 179, + 510, + 275 + ], + "type": "image", + "image_path": "1b8601f013b93c27baf25e41c09132b903155d83d0ba6b9bdcd83e2b33d24788.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 100, + 276, + 197, + 372 + ], + "blocks": [ + { + "bbox": [ + 100, + 276, + 197, + 372 + ], + "lines": [ + { + "bbox": [ + 100, + 276, + 197, + 372 + ], + "spans": [ + { + "bbox": [ + 100, + 276, + 197, + 372 + ], + "type": "image", + "image_path": "7add957f9d7531f536cfef81731bc6b28f75e12995ba9dbfe3e545658a323c7d.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 205, + 276, + 301, + 372 + ], + "blocks": [ + { + "bbox": [ + 205, + 276, + 301, + 372 + ], + "lines": [ + { + "bbox": [ + 205, + 276, + 301, + 372 + ], + "spans": [ + { + "bbox": [ + 205, + 276, + 301, + 372 + ], + "type": "image", + "image_path": "8d55ee618dd53e3a5e154598b93d67afc79a4d7bf139ce03e31d740af22b29f0.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 309, + 276, + 406, + 372 + ], + "blocks": [ + { + "bbox": [ + 309, + 276, + 406, + 372 + ], + "lines": [ + { + "bbox": [ + 309, + 276, + 406, + 372 + ], + "spans": [ + { + "bbox": [ + 309, + 276, + 406, + 372 + ], + "type": "image", + "image_path": "9149d06a0cd845a92028c8f8199da8213692d3c85666f692dd350f78144af753.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 413, + 276, + 510, + 372 + ], + "blocks": [ + { + "bbox": [ + 413, + 276, + 510, + 372 + ], + "lines": [ + { + "bbox": [ + 413, + 276, + 510, + 372 + ], + "spans": [ + { + "bbox": [ + 413, + 276, + 510, + 372 + ], + "type": "image", + "image_path": "666712e651824dc36d467ea97f632d637319b03414823161f4a04e13a521f767.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 101, + 373, + 197, + 469 + ], + "blocks": [ + { + "bbox": [ + 101, + 373, + 197, + 469 + ], + "lines": [ + { + "bbox": [ + 101, + 373, + 197, + 469 + ], + "spans": [ + { + "bbox": [ + 101, + 373, + 197, + 469 + ], + "type": "image", + "image_path": "c46b403e7ac2e945512ffa6d309f1e6f20a355598593e3758059233cf042e2e6.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 205, + 373, + 301, + 469 + ], + "blocks": [ + { + "bbox": [ + 205, + 373, + 301, + 469 + ], + "lines": [ + { + "bbox": [ + 205, + 373, + 301, + 469 + ], + "spans": [ + { + "bbox": [ + 205, + 373, + 301, + 469 + ], + "type": "image", + "image_path": "b15793d17ca7c80bf1b909d279a47bdf5b3a139165326ca3c1dffd011f7b0722.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 309, + 373, + 406, + 468 + ], + "blocks": [ + { + "bbox": [ + 309, + 373, + 406, + 468 + ], + "lines": [ + { + "bbox": [ + 309, + 373, + 406, + 468 + ], + "spans": [ + { + "bbox": [ + 309, + 373, + 406, + 468 + ], + "type": "image", + "image_path": "875d4fca774409cc984e2951d3b6c99aa12608a7a78632a41fcd84f6bd223082.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 413, + 373, + 511, + 468 + ], + "blocks": [ + { + "bbox": [ + 413, + 373, + 511, + 468 + ], + "lines": [ + { + "bbox": [ + 413, + 373, + 511, + 468 + ], + "spans": [ + { + "bbox": [ + 413, + 373, + 511, + 468 + ], + "type": "image", + "image_path": "1a0ac4c5dee91e1501338bfcaaf66863752c28b3f6438eb24488b2e78087a942.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 100, + 473, + 198, + 570 + ], + "blocks": [ + { + "bbox": [ + 100, + 473, + 198, + 570 + ], + "lines": [ + { + "bbox": [ + 100, + 473, + 198, + 570 + ], + "spans": [ + { + "bbox": [ + 100, + 473, + 198, + 570 + ], + "type": "image", + "image_path": "f5a97061e0c642e8ad66652a68547da7ce296666b9869a78d2e0233544c9a488.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 620, + 533, + 666 + ], + "lines": [ + { + "bbox": [ + 77, + 620, + 533, + 666 + ], + "spans": [ + { + "bbox": [ + 77, + 620, + 533, + 666 + ], + "type": "text", + "content": "Figure 44: Task: Image-to-3D model rendering. Evaluate the 3D modeling ability given a 2D image. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o can generate better 3D model rendering with consistent shape, texture, and plausible wireframe than Gemini 2.0 Flash and Midjourney v6.1." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 205, + 473, + 302, + 570 + ], + "blocks": [ + { + "bbox": [ + 205, + 473, + 302, + 570 + ], + "lines": [ + { + "bbox": [ + 205, + 473, + 302, + 570 + ], + "spans": [ + { + "bbox": [ + 205, + 473, + 302, + 570 + ], + "type": "image", + "image_path": "a11e7e5a889c46ee27cc17d0f091d77cb1c167e6c4314649ed219089b6bfe94a.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 309, + 473, + 407, + 570 + ], + "blocks": [ + { + "bbox": [ + 309, + 473, + 407, + 570 + ], + "lines": [ + { + "bbox": [ + 309, + 473, + 407, + 570 + ], + "spans": [ + { + "bbox": [ + 309, + 473, + 407, + 570 + ], + "type": "image", + "image_path": "4b00229ed17a9f68b0777d94b5bc1f08385f38b87d3759265475e7b14d5c6df0.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 413, + 473, + 510, + 569 + ], + "blocks": [ + { + "bbox": [ + 413, + 473, + 510, + 569 + ], + "lines": [ + { + "bbox": [ + 413, + 473, + 510, + 569 + ], + "spans": [ + { + "bbox": [ + 413, + 473, + 510, + 569 + ], + "type": "image", + "image_path": "67c90170597a34e84c1311ea2c3f363b473991486ba6bf0a15957d95db598921.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 91, + 578, + 523, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 578, + 523, + 590 + ], + "spans": [ + { + "bbox": [ + 91, + 578, + 523, + 590 + ], + "type": "text", + "content": "Input Text: \"Generate a pre-render view of a C4D model, including the UI, wireframe and material.\"" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 116, + 596, + 172, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 596, + 172, + 608 + ], + "spans": [ + { + "bbox": [ + 116, + 596, + 172, + 608 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 235, + 595, + 267, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 595, + 267, + 605 + ], + "spans": [ + { + "bbox": [ + 235, + 595, + 267, + 605 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 321, + 595, + 392, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 595, + 392, + 605 + ], + "spans": [ + { + "bbox": [ + 321, + 595, + 392, + 605 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 427, + 594, + 496, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 427, + 594, + 496, + 606 + ], + "spans": [ + { + "bbox": [ + 427, + 594, + 496, + 606 + ], + "type": "text", + "content": "Midjourney v6.1" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "59" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 58 + }, + { + "para_blocks": [ + { + "bbox": [ + 99, + 126, + 230, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 126, + 230, + 140 + ], + "spans": [ + { + "bbox": [ + 99, + 126, + 230, + 140 + ], + "type": "text", + "content": "2D UV map to 3D rendering" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 203, + 153, + 217, + 167 + ], + "blocks": [ + { + "bbox": [ + 203, + 153, + 217, + 167 + ], + "lines": [ + { + "bbox": [ + 203, + 153, + 217, + 167 + ], + "spans": [ + { + "bbox": [ + 203, + 153, + 217, + 167 + ], + "type": "image", + "image_path": "5df66609cbc1920bfd7bfab7731dee320fbf9bd64ef127bfe114205f6384aaa1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 217, + 156, + 422, + 170 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 156, + 422, + 170 + ], + "spans": [ + { + "bbox": [ + 217, + 156, + 422, + 170 + ], + "type": "text", + "content": "Evaluation: Structure/pattern consistency." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 91, + 174, + 216, + 269 + ], + "blocks": [ + { + "bbox": [ + 91, + 174, + 216, + 269 + ], + "lines": [ + { + "bbox": [ + 91, + 174, + 216, + 269 + ], + "spans": [ + { + "bbox": [ + 91, + 174, + 216, + 269 + ], + "type": "image", + "image_path": "9410af00d870c0c0323ba5f3b9c30e57a1d3b4116a79dda9d22fbb7f6ef86c7f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 220, + 174, + 319, + 270 + ], + "blocks": [ + { + "bbox": [ + 220, + 174, + 319, + 270 + ], + "lines": [ + { + "bbox": [ + 220, + 174, + 319, + 270 + ], + "spans": [ + { + "bbox": [ + 220, + 174, + 319, + 270 + ], + "type": "image", + "image_path": "0673e0b100dc90c3154a0f84f46af572d02b7c0ce4f3ef16fade43705be3a169.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 323, + 174, + 418, + 270 + ], + "blocks": [ + { + "bbox": [ + 323, + 174, + 418, + 270 + ], + "lines": [ + { + "bbox": [ + 323, + 174, + 418, + 270 + ], + "spans": [ + { + "bbox": [ + 323, + 174, + 418, + 270 + ], + "type": "image", + "image_path": "39a867d5c76eb0ae5b81b696fa69ab0660b5b5250864e6b56ff6616c1d01cf18.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 421, + 174, + 518, + 270 + ], + "blocks": [ + { + "bbox": [ + 421, + 174, + 518, + 270 + ], + "lines": [ + { + "bbox": [ + 421, + 174, + 518, + 270 + ], + "spans": [ + { + "bbox": [ + 421, + 174, + 518, + 270 + ], + "type": "image", + "image_path": "f00108f1137a655c41931a3eca3d59fb3d37c83f487aa21775f7ef258746a7e5.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 91, + 272, + 214, + 369 + ], + "blocks": [ + { + "bbox": [ + 91, + 272, + 214, + 369 + ], + "lines": [ + { + "bbox": [ + 91, + 272, + 214, + 369 + ], + "spans": [ + { + "bbox": [ + 91, + 272, + 214, + 369 + ], + "type": "image", + "image_path": "c720d7639945658d54f942ed378b4e165f730b8de7a268e21417078ae525472e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 221, + 272, + 319, + 369 + ], + "blocks": [ + { + "bbox": [ + 221, + 272, + 319, + 369 + ], + "lines": [ + { + "bbox": [ + 221, + 272, + 319, + 369 + ], + "spans": [ + { + "bbox": [ + 221, + 272, + 319, + 369 + ], + "type": "image", + "image_path": "50e1bcc4270a236adf25775994fc41829c4d6c762fc5cef6ca4d34dd425a43e5.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 323, + 272, + 419, + 369 + ], + "blocks": [ + { + "bbox": [ + 323, + 272, + 419, + 369 + ], + "lines": [ + { + "bbox": [ + 323, + 272, + 419, + 369 + ], + "spans": [ + { + "bbox": [ + 323, + 272, + 419, + 369 + ], + "type": "image", + "image_path": "34545e79e79f3078a365a6cb3d938519fac2d3ed1ebd3db57368a178779167ae.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 423, + 274, + 517, + 370 + ], + "blocks": [ + { + "bbox": [ + 423, + 274, + 517, + 370 + ], + "lines": [ + { + "bbox": [ + 423, + 274, + 517, + 370 + ], + "spans": [ + { + "bbox": [ + 423, + 274, + 517, + 370 + ], + "type": "image", + "image_path": "e914339e66bb3680ffe4e499cab448aa16a263e1ab23c54fac562df95422444c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 92, + 370, + 216, + 474 + ], + "blocks": [ + { + "bbox": [ + 92, + 370, + 216, + 474 + ], + "lines": [ + { + "bbox": [ + 92, + 370, + 216, + 474 + ], + "spans": [ + { + "bbox": [ + 92, + 370, + 216, + 474 + ], + "type": "image", + "image_path": "028da69b538ec977850baf21c5611c81d88bf976beeeeeccb70474beba17944b.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 221, + 371, + 319, + 474 + ], + "blocks": [ + { + "bbox": [ + 221, + 371, + 319, + 474 + ], + "lines": [ + { + "bbox": [ + 221, + 371, + 319, + 474 + ], + "spans": [ + { + "bbox": [ + 221, + 371, + 319, + 474 + ], + "type": "image", + "image_path": "915474b5475b9cfb5be03f5b44afa9a58d8047bd5530980a2d330a11b5d8294c.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 323, + 372, + 419, + 474 + ], + "blocks": [ + { + "bbox": [ + 323, + 372, + 419, + 474 + ], + "lines": [ + { + "bbox": [ + 323, + 372, + 419, + 474 + ], + "spans": [ + { + "bbox": [ + 323, + 372, + 419, + 474 + ], + "type": "image", + "image_path": "391fb13f1c421b143115f7495cad2522d8d46310cd74bbe3fd2d5b33986486e1.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 630, + 533, + 675 + ], + "lines": [ + { + "bbox": [ + 77, + 630, + 533, + 675 + ], + "spans": [ + { + "bbox": [ + 77, + 630, + 533, + 675 + ], + "type": "text", + "content": "Figure 45: Task: 2D UV map to 3D rendering. Evaluate the 3D perception and spatial understanding ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o can generate better 3D renderings based on 2D maps than Gemini 2.0 Flash and Midjourney v6.1. However, structure and pattern inconsistencies still exist among these three models." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 421, + 372, + 518, + 474 + ], + "blocks": [ + { + "bbox": [ + 421, + 372, + 518, + 474 + ], + "lines": [ + { + "bbox": [ + 421, + 372, + 518, + 474 + ], + "spans": [ + { + "bbox": [ + 421, + 372, + 518, + 474 + ], + "type": "image", + "image_path": "54e8f1e07b35c7646e6a1992f478ea6768d117a77bc4a4fc4afcd51a7f5a0920.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 91, + 479, + 217, + 573 + ], + "blocks": [ + { + "bbox": [ + 91, + 479, + 217, + 573 + ], + "lines": [ + { + "bbox": [ + 91, + 479, + 217, + 573 + ], + "spans": [ + { + "bbox": [ + 91, + 479, + 217, + 573 + ], + "type": "image", + "image_path": "3aca5dc86dd3946c68be0172061baa23975fed2075ecacb889e265f89089a1e4.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 92, + 578, + 504, + 602 + ], + "lines": [ + { + "bbox": [ + 92, + 578, + 504, + 602 + ], + "spans": [ + { + "bbox": [ + 92, + 578, + 504, + 602 + ], + "type": "text", + "content": "Input Text: \"Assemble this packaging cutout into a complete product and output a 3D rendered image.\"" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 221, + 478, + 319, + 574 + ], + "blocks": [ + { + "bbox": [ + 221, + 478, + 319, + 574 + ], + "lines": [ + { + "bbox": [ + 221, + 478, + 319, + 574 + ], + "spans": [ + { + "bbox": [ + 221, + 478, + 319, + 574 + ], + "type": "image", + "image_path": "0a94fc94c0714aa10ea27a9a7909f7e8481229af8d64048c14d36241ab29679f.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 323, + 478, + 419, + 574 + ], + "blocks": [ + { + "bbox": [ + 323, + 478, + 419, + 574 + ], + "lines": [ + { + "bbox": [ + 323, + 478, + 419, + 574 + ], + "spans": [ + { + "bbox": [ + 323, + 478, + 419, + 574 + ], + "type": "image", + "image_path": "302a3887daa57a9516027cfc6473b7293f8f9c6bd1007fa32f38d00d07543191.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 422, + 478, + 518, + 574 + ], + "blocks": [ + { + "bbox": [ + 422, + 478, + 518, + 574 + ], + "lines": [ + { + "bbox": [ + 422, + 478, + 518, + 574 + ], + "spans": [ + { + "bbox": [ + 422, + 478, + 518, + 574 + ], + "type": "image", + "image_path": "887ddccf2929d6f4372e9362922daad36a1526e449412d7bfea8a7165bea64b9.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "bbox": [ + 121, + 605, + 177, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 605, + 177, + 616 + ], + "spans": [ + { + "bbox": [ + 121, + 605, + 177, + 616 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 241, + 604, + 274, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 604, + 274, + 614 + ], + "spans": [ + { + "bbox": [ + 241, + 604, + 274, + 614 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 329, + 604, + 401, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 329, + 604, + 401, + 615 + ], + "spans": [ + { + "bbox": [ + 329, + 604, + 401, + 615 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 430, + 604, + 500, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 430, + 604, + 500, + 616 + ], + "spans": [ + { + "bbox": [ + 430, + 604, + 500, + 616 + ], + "type": "text", + "content": "Midjourney v6.1" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "60" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 59 + }, + { + "para_blocks": [ + { + "bbox": [ + 113, + 153, + 217, + 167 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 153, + 217, + 167 + ], + "spans": [ + { + "bbox": [ + 113, + 153, + 217, + 167 + ], + "type": "text", + "content": "Novel View Synthesis" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 242, + 178, + 374, + 194 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 178, + 374, + 194 + ], + "spans": [ + { + "bbox": [ + 242, + 178, + 374, + 194 + ], + "type": "text", + "content": "Evaluation: Consistency." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 102, + 198, + 156, + 289 + ], + "blocks": [ + { + "bbox": [ + 102, + 198, + 156, + 289 + ], + "lines": [ + { + "bbox": [ + 102, + 198, + 156, + 289 + ], + "spans": [ + { + "bbox": [ + 102, + 198, + 156, + 289 + ], + "type": "image", + "image_path": "2fc6af7cdb8c9f7e71021c537aa0aba9a3610880f2762857e8317da88256e3c0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 160, + 198, + 279, + 289 + ], + "blocks": [ + { + "bbox": [ + 160, + 198, + 279, + 289 + ], + "lines": [ + { + "bbox": [ + 160, + 198, + 279, + 289 + ], + "spans": [ + { + "bbox": [ + 160, + 198, + 279, + 289 + ], + "type": "image", + "image_path": "e502bbf30709fbc0fd7fe24f3e4886279e719fcc412c016e104a0028ed894e18.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 281, + 199, + 394, + 289 + ], + "blocks": [ + { + "bbox": [ + 281, + 199, + 394, + 289 + ], + "lines": [ + { + "bbox": [ + 281, + 199, + 394, + 289 + ], + "spans": [ + { + "bbox": [ + 281, + 199, + 394, + 289 + ], + "type": "image", + "image_path": "a8a92b2a237654ea8a1e5c6eca6a9384ebacc2f436fc6dc6a34e87dd215b9d99.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 397, + 198, + 518, + 289 + ], + "blocks": [ + { + "bbox": [ + 397, + 198, + 518, + 289 + ], + "lines": [ + { + "bbox": [ + 397, + 198, + 518, + 289 + ], + "spans": [ + { + "bbox": [ + 397, + 198, + 518, + 289 + ], + "type": "image", + "image_path": "763c96045715d809c9e422f9aa615105eac65d7140529630622d8010d75de173.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 102, + 292, + 156, + 379 + ], + "blocks": [ + { + "bbox": [ + 102, + 292, + 156, + 379 + ], + "lines": [ + { + "bbox": [ + 102, + 292, + 156, + 379 + ], + "spans": [ + { + "bbox": [ + 102, + 292, + 156, + 379 + ], + "type": "image", + "image_path": "3dbfbe5de303b95e93930efc7c11ef12558704eeaae8642a301b7dbc59420786.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 161, + 293, + 279, + 379 + ], + "blocks": [ + { + "bbox": [ + 161, + 293, + 279, + 379 + ], + "lines": [ + { + "bbox": [ + 161, + 293, + 279, + 379 + ], + "spans": [ + { + "bbox": [ + 161, + 293, + 279, + 379 + ], + "type": "image", + "image_path": "92052199d2635356816ee2aba2aed1f8dca8d83b493fd47666fe5985bfd49a89.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 280, + 293, + 394, + 381 + ], + "blocks": [ + { + "bbox": [ + 280, + 293, + 394, + 381 + ], + "lines": [ + { + "bbox": [ + 280, + 293, + 394, + 381 + ], + "spans": [ + { + "bbox": [ + 280, + 293, + 394, + 381 + ], + "type": "image", + "image_path": "d2dc794b351faf6fef98c864ed3e399cf079c26ef90eadbb34e0c6fc3e4be78c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 398, + 293, + 517, + 381 + ], + "blocks": [ + { + "bbox": [ + 398, + 293, + 517, + 381 + ], + "lines": [ + { + "bbox": [ + 398, + 293, + 517, + 381 + ], + "spans": [ + { + "bbox": [ + 398, + 293, + 517, + 381 + ], + "type": "image", + "image_path": "4ae43763ef324638250f7125268cf0a0cd1fadff5496f7eae1707c4aaad3ee1c.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 102, + 382, + 156, + 462 + ], + "blocks": [ + { + "bbox": [ + 102, + 382, + 156, + 462 + ], + "lines": [ + { + "bbox": [ + 102, + 382, + 156, + 462 + ], + "spans": [ + { + "bbox": [ + 102, + 382, + 156, + 462 + ], + "type": "image", + "image_path": "97cb4999fb7546f2acbbc518825a138f6e835d048cf927abe4812e6a75004c0d.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 602, + 532, + 647 + ], + "lines": [ + { + "bbox": [ + 77, + 602, + 532, + 647 + ], + "spans": [ + { + "bbox": [ + 77, + 602, + 532, + 647 + ], + "type": "text", + "content": "Figure 46: Task: Novel view synthesis. Evaluate the 3D perception and spatial understanding ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Midjourney v6.1 [75]. Observation: GPT-4o can generate better style and structure-consistent novel views for both artistic painting and asymmetric objects." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 161, + 382, + 279, + 462 + ], + "blocks": [ + { + "bbox": [ + 161, + 382, + 279, + 462 + ], + "lines": [ + { + "bbox": [ + 161, + 382, + 279, + 462 + ], + "spans": [ + { + "bbox": [ + 161, + 382, + 279, + 462 + ], + "type": "image", + "image_path": "d895489817a6f21c887841484b153778383ad7ed2f565fe9eb3bf5a334aa848c.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 280, + 383, + 394, + 463 + ], + "blocks": [ + { + "bbox": [ + 280, + 383, + 394, + 463 + ], + "lines": [ + { + "bbox": [ + 280, + 383, + 394, + 463 + ], + "spans": [ + { + "bbox": [ + 280, + 383, + 394, + 463 + ], + "type": "image", + "image_path": "7f93176c11f8bb6fd47383525af946efe1f62c62e955b8c39ee12e4183f7af70.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 397, + 383, + 517, + 464 + ], + "blocks": [ + { + "bbox": [ + 397, + 383, + 517, + 464 + ], + "lines": [ + { + "bbox": [ + 397, + 383, + 517, + 464 + ], + "spans": [ + { + "bbox": [ + 397, + 383, + 517, + 464 + ], + "type": "image", + "image_path": "2ac37d8f19a0434436d70e6adb1d281a20d590ab3f0d0d7a7fa32893d0767278.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 102, + 466, + 156, + 545 + ], + "blocks": [ + { + "bbox": [ + 102, + 466, + 156, + 545 + ], + "lines": [ + { + "bbox": [ + 102, + 466, + 156, + 545 + ], + "spans": [ + { + "bbox": [ + 102, + 466, + 156, + 545 + ], + "type": "image", + "image_path": "1c5838ea94c22768e1999617b5ceda45f55d61e50d2423844f55c0de5fe7e177.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 161, + 465, + 279, + 545 + ], + "blocks": [ + { + "bbox": [ + 161, + 465, + 279, + 545 + ], + "lines": [ + { + "bbox": [ + 161, + 465, + 279, + 545 + ], + "spans": [ + { + "bbox": [ + 161, + 465, + 279, + 545 + ], + "type": "image", + "image_path": "06dfd66ade1356c7cd3d3f8e966e4e534aad24c5c887a40d0ac06dfd803d92ab.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 280, + 464, + 394, + 547 + ], + "blocks": [ + { + "bbox": [ + 280, + 464, + 394, + 547 + ], + "lines": [ + { + "bbox": [ + 280, + 464, + 394, + 547 + ], + "spans": [ + { + "bbox": [ + 280, + 464, + 394, + 547 + ], + "type": "image", + "image_path": "89c711b51bd739f9ec7fcc44d62889f885c5ec915e0fc0cda6d670f5ca049e58.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 397, + 466, + 517, + 546 + ], + "blocks": [ + { + "bbox": [ + 397, + 466, + 517, + 546 + ], + "lines": [ + { + "bbox": [ + 397, + 466, + 517, + 546 + ], + "spans": [ + { + "bbox": [ + 397, + 466, + 517, + 546 + ], + "type": "image", + "image_path": "5bd9acccaac95cfe7fe77ac7db21eb318d54092425bd5f07277c03e7499c903f.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 189, + 552, + 413, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 552, + 413, + 565 + ], + "spans": [ + { + "bbox": [ + 189, + 552, + 413, + 565 + ], + "type": "text", + "content": "Input Text: \"Generate three views of this picture.\"" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 108, + 569, + 164, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 569, + 164, + 582 + ], + "spans": [ + { + "bbox": [ + 108, + 569, + 164, + 582 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 214, + 571, + 247, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 571, + 247, + 581 + ], + "spans": [ + { + "bbox": [ + 214, + 571, + 247, + 581 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 570, + 379, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 570, + 379, + 581 + ], + "spans": [ + { + "bbox": [ + 308, + 570, + 379, + 581 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 419, + 569, + 488, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 569, + 488, + 582 + ], + "spans": [ + { + "bbox": [ + 419, + 569, + 488, + 582 + ], + "type": "text", + "content": "Midjourney v6.1" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "61" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 60 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 179, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 179, + 83 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 179, + 83 + ], + "type": "text", + "content": "2.4 Image-to-X Tasks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 92, + 533, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 92, + 533, + 126 + ], + "spans": [ + { + "bbox": [ + 77, + 92, + 533, + 126 + ], + "type": "text", + "content": "In this section, we further evaluate both GPT-4o and Gemini 2.0 Flash for several dense image understanding tasks, including segmentation-related tasks, depth estimation, normal estimation, matting, salient object detection, edge detection, layout detection, text detection, and object tracking." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 78, + 137, + 198, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 137, + 198, + 148 + ], + "spans": [ + { + "bbox": [ + 78, + 137, + 198, + 148 + ], + "type": "text", + "content": "2.4.1 Image Segmentation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 156, + 533, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 156, + 533, + 233 + ], + "spans": [ + { + "bbox": [ + 77, + 156, + 533, + 233 + ], + "type": "text", + "content": "Image segmentation tasks group pixels of the given image or video into semantic regions. It is a fundamental problem in computer vision and involves numerous real-world applications, such as robotics, automated surveillance, and image/video editing. With the development of recent deep learning methods, this domain has achieved rapid progress. Early works mainly adopt CNN-based methods with large kernels or respective fields. Recently, transformer-based methods have also worked well and surpassed previous CNN-based methods on various benchmarks. In particular, we test three segmentation tasks, including referring segmentation, semantic segmentation, and panoptic segmentation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 238, + 532, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 238, + 532, + 360 + ], + "spans": [ + { + "bbox": [ + 77, + 238, + 532, + 360 + ], + "type": "text", + "content": "Referring Segmentation. This task outputs the corresponding mask according to the input texts, and the goal is to test the pixel-level grounding ability of the model. In Figure 47, we compare GPT-4o, Gemini 2.0 Flash and recent state-of-the-art method, Sa2VA [117] (8B model " + }, + { + "bbox": [ + 77, + 238, + 532, + 360 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 77, + 238, + 532, + 360 + ], + "type": "text", + "content": "). We show five open-world test cases. For the first two cases, GPT-4o shows the coarse localization ability on the background region. For example, it can mark the grass region despite the unfavorable boundaries. However, compared to the SOTA method, Sa2VA, GPT-4o mistakenly merges both large regions. In the third row, both GPT-4o and Gemini 2.0 Flash cannot perform grounding with complex text inputs. In the fourth row, all models perform badly. GPT-4o generates an unseen chair in the images while Gemini 2.0 Flash performs image editing functions by replacing the smallest chair with a normal chair. Sa2VA also segments the wrong object (the nearest chair). In the last example, GPT-4o also cannot segment smaller objects (\"bag\"). For all examples, both GPT-4o and Gemini 2.0 Flash modify the image contents. These examples indicate that GPT-4o has weak pixel grounding ability." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 77, + 363, + 533, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 363, + 533, + 462 + ], + "spans": [ + { + "bbox": [ + 77, + 363, + 533, + 462 + ], + "type": "text", + "content": "Semantic Segmentation. Semantic segmentation assigns each pixel a semantic label, which is one basic vision task. In Figure 48, we show several test cases on the semantic segmentation task. In particular, we adopt Deeplab-V3+ [14] (ResNet101 as backbone, trained on Pascal-Context) as one expert model for reference. Surprisingly, the mask quality of GPT-4o is good on four examples, even comparable with an expert model, Deeplab-V3+. During the testing, we find the texts may be randomly appended to the masks. This is why the first row differs from the remaining examples. For the second and third examples, GPT-4o misaligns the text and mask regions. Compared to Gemini 2.0 Flash, GPT-4o has a much stronger ability in semantic segmentation, particularly for mask shape. However, there is still a lot of room for this task, including a unified semantic segmentation format, enhanced text and mask alignments, and more correct mask labels." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 77, + 467, + 533, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 467, + 533, + 588 + ], + "spans": [ + { + "bbox": [ + 77, + 467, + 533, + 588 + ], + "type": "text", + "content": "Panoptic Segmentation. This task assigns the foreground region a semantic label and assigns one mask label and one instance ID to each instance, which is a unified task format of semantic segmentation and instance segmentation. In Figure 49, we compare the panoptic segmentation ability of GPT-4o, Gemini 2.0 Flash, and one expert model, K-Net [123](trained on the COCO panoptic segmentation dataset, with ResNet50 as backbone). Overall, the mask shapes of GPT-4o are good. The model can understand the panoptic segmentation task, while the Gemini 2.0 Flash cannot do this task in the first and third cases. However, the spatial locations have been changed for all cases. The generated masks are in part-whole formats and are even finer-grained than K-Net. For example, in the first example, the jersey number (17) of the person and the hair of the people are also marked. Meanwhile, we also find a similar issue: several examples have text, while several do not have text, even though they adopt the same text prompt. In addition, GPT-4o can distinguish different instances with different colors, despite most of them not being good (see the last example)." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 93, + 710, + 261, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 710, + 261, + 722 + ], + "spans": [ + { + "bbox": [ + 93, + 710, + 261, + 722 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 93, + 710, + 261, + 722 + ], + "type": "text", + "content": " https://huggingface.co/ByteDance/Sa2VA-8B" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "62" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 61 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 80, + 186, + 92 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 80, + 186, + 92 + ], + "spans": [ + { + "bbox": [ + 130, + 80, + 186, + 92 + ], + "type": "text", + "content": "Image-to-X" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 103, + 481, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 103, + 481, + 117 + ], + "spans": [ + { + "bbox": [ + 141, + 103, + 481, + 117 + ], + "type": "text", + "content": "Evaluation: Referring Expression Segmentation, Grounding and Grouping." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 111, + 120, + 200, + 207 + ], + "blocks": [ + { + "bbox": [ + 111, + 120, + 200, + 207 + ], + "lines": [ + { + "bbox": [ + 111, + 120, + 200, + 207 + ], + "spans": [ + { + "bbox": [ + 111, + 120, + 200, + 207 + ], + "type": "image", + "image_path": "dbd74c382963ca8096f3df2cbf8c15b1dc9bb0dcf7b14f2691ca5f3969de4ae6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 210, + 120, + 299, + 206 + ], + "blocks": [ + { + "bbox": [ + 210, + 120, + 299, + 206 + ], + "lines": [ + { + "bbox": [ + 210, + 120, + 299, + 206 + ], + "spans": [ + { + "bbox": [ + 210, + 120, + 299, + 206 + ], + "type": "image", + "image_path": "0602a903854b76208c57b28e4349dae82df8b2aa2e65f275df6b6940b8ec8061.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 313, + 120, + 399, + 206 + ], + "blocks": [ + { + "bbox": [ + 313, + 120, + 399, + 206 + ], + "lines": [ + { + "bbox": [ + 313, + 120, + 399, + 206 + ], + "spans": [ + { + "bbox": [ + 313, + 120, + 399, + 206 + ], + "type": "image", + "image_path": "03c4e2f2110cca390cec16e188035acf44295367b91027a771e4e53dedaf79e1.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 411, + 120, + 500, + 206 + ], + "blocks": [ + { + "bbox": [ + 411, + 120, + 500, + 206 + ], + "lines": [ + { + "bbox": [ + 411, + 120, + 500, + 206 + ], + "spans": [ + { + "bbox": [ + 411, + 120, + 500, + 206 + ], + "type": "image", + "image_path": "452eb4a0a98c3bffd9b5ca2149205d5cd34827599c8caa097cbd6d949a07b14c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 212, + 491, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 212, + 491, + 223 + ], + "spans": [ + { + "bbox": [ + 111, + 212, + 491, + 223 + ], + "type": "text", + "content": "Input Text: \"Please segment the grass in the image and directly generate the output image.\"" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 111, + 226, + 201, + 316 + ], + "blocks": [ + { + "bbox": [ + 111, + 226, + 201, + 316 + ], + "lines": [ + { + "bbox": [ + 111, + 226, + 201, + 316 + ], + "spans": [ + { + "bbox": [ + 111, + 226, + 201, + 316 + ], + "type": "image", + "image_path": "8f284b6c611448bfc951157e9a8d4ed20c6128da83b6852e318506b931aab1de.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 211, + 226, + 300, + 316 + ], + "blocks": [ + { + "bbox": [ + 211, + 226, + 300, + 316 + ], + "lines": [ + { + "bbox": [ + 211, + 226, + 300, + 316 + ], + "spans": [ + { + "bbox": [ + 211, + 226, + 300, + 316 + ], + "type": "image", + "image_path": "f99ed011b160f34447b7db945e73c93bb19c1fb9a7888322a3f61e89fa0b9b59.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 312, + 226, + 402, + 316 + ], + "blocks": [ + { + "bbox": [ + 312, + 226, + 402, + 316 + ], + "lines": [ + { + "bbox": [ + 312, + 226, + 402, + 316 + ], + "spans": [ + { + "bbox": [ + 312, + 226, + 402, + 316 + ], + "type": "image", + "image_path": "c5ff8325e12ed6030f6fd0a88c40d2600c49d7423ec72c29d20775bcecc460a1.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 411, + 227, + 500, + 316 + ], + "blocks": [ + { + "bbox": [ + 411, + 227, + 500, + 316 + ], + "lines": [ + { + "bbox": [ + 411, + 227, + 500, + 316 + ], + "spans": [ + { + "bbox": [ + 411, + 227, + 500, + 316 + ], + "type": "image", + "image_path": "e331257e3a002ea35aae9b3560e329ae51aea6ec05a4a8f8fc4c533cfe981c94.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 319, + 485, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 319, + 485, + 331 + ], + "spans": [ + { + "bbox": [ + 111, + 319, + 485, + 331 + ], + "type": "text", + "content": "Input Text: \"Please segment the sand in the image and directly generate the output image.\"" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 111, + 335, + 200, + 425 + ], + "blocks": [ + { + "bbox": [ + 111, + 335, + 200, + 425 + ], + "lines": [ + { + "bbox": [ + 111, + 335, + 200, + 425 + ], + "spans": [ + { + "bbox": [ + 111, + 335, + 200, + 425 + ], + "type": "image", + "image_path": "f2338a3386d7dafb1d4ed2f0d097545a66aaae303a0ec9c364235ccd56f6fb11.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 211, + 335, + 300, + 425 + ], + "blocks": [ + { + "bbox": [ + 211, + 335, + 300, + 425 + ], + "lines": [ + { + "bbox": [ + 211, + 335, + 300, + 425 + ], + "spans": [ + { + "bbox": [ + 211, + 335, + 300, + 425 + ], + "type": "image", + "image_path": "94ec2cb93bf6619e7cff5d27b701fd16348dc2330513de806c10891f384b214e.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 313, + 335, + 402, + 425 + ], + "blocks": [ + { + "bbox": [ + 313, + 335, + 402, + 425 + ], + "lines": [ + { + "bbox": [ + 313, + 335, + 402, + 425 + ], + "spans": [ + { + "bbox": [ + 313, + 335, + 402, + 425 + ], + "type": "image", + "image_path": "bc7f355d6226e9ae8b3bf01b1ec603dc9a91ef5a37b5df843f5fa2d53f805b40.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 411, + 335, + 500, + 425 + ], + "blocks": [ + { + "bbox": [ + 411, + 335, + 500, + 425 + ], + "lines": [ + { + "bbox": [ + 411, + 335, + 500, + 425 + ], + "spans": [ + { + "bbox": [ + 411, + 335, + 500, + 425 + ], + "type": "image", + "image_path": "54845239cccd77ffed346bbbb01a2390dc01dd6649c038184a108f0d78f6e18b.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 427, + 503, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 427, + 503, + 449 + ], + "spans": [ + { + "bbox": [ + 111, + 427, + 503, + 449 + ], + "type": "text", + "content": "Input Text: \"Please segment the table beside the black sofa in the image and directly generate the output image.\"" + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 111, + 452, + 200, + 541 + ], + "blocks": [ + { + "bbox": [ + 111, + 452, + 200, + 541 + ], + "lines": [ + { + "bbox": [ + 111, + 452, + 200, + 541 + ], + "spans": [ + { + "bbox": [ + 111, + 452, + 200, + 541 + ], + "type": "image", + "image_path": "40eaf0cc2887a61189056995988a25e411abbb492721dd306939eb435cb2c205.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 211, + 452, + 297, + 541 + ], + "blocks": [ + { + "bbox": [ + 211, + 452, + 297, + 541 + ], + "lines": [ + { + "bbox": [ + 211, + 452, + 297, + 541 + ], + "spans": [ + { + "bbox": [ + 211, + 452, + 297, + 541 + ], + "type": "image", + "image_path": "fd5210b3394235c1477fe979838a108f3de32982a0527224a4d83dc207ae62bc.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 312, + 452, + 400, + 541 + ], + "blocks": [ + { + "bbox": [ + 312, + 452, + 400, + 541 + ], + "lines": [ + { + "bbox": [ + 312, + 452, + 400, + 541 + ], + "spans": [ + { + "bbox": [ + 312, + 452, + 400, + 541 + ], + "type": "image", + "image_path": "900798659953d6bf5da0b4a07b8d732ff649b11f079acb0d0b7f9e3317082e6e.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 411, + 452, + 500, + 541 + ], + "blocks": [ + { + "bbox": [ + 411, + 452, + 500, + 541 + ], + "lines": [ + { + "bbox": [ + 411, + 452, + 500, + 541 + ], + "spans": [ + { + "bbox": [ + 411, + 452, + 500, + 541 + ], + "type": "image", + "image_path": "44a28c987decc43c2c7016a554b05881a1fabeb9cb87f073992406fc3bf4eeb3.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 115, + 544, + 476, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 544, + 476, + 555 + ], + "spans": [ + { + "bbox": [ + 115, + 544, + 476, + 555 + ], + "type": "text", + "content": "Input Text: \"Please segment the smallest chair and directly generate the output image.\"" + } + ] + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 111, + 559, + 200, + 647 + ], + "blocks": [ + { + "bbox": [ + 111, + 559, + 200, + 647 + ], + "lines": [ + { + "bbox": [ + 111, + 559, + 200, + 647 + ], + "spans": [ + { + "bbox": [ + 111, + 559, + 200, + 647 + ], + "type": "image", + "image_path": "7be4bdc21bc3e0a0da4304091bcaaf819d4c752aa483ba4d25c1add751eaadcc.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 694, + 533, + 728 + ], + "lines": [ + { + "bbox": [ + 77, + 694, + 533, + 728 + ], + "spans": [ + { + "bbox": [ + 77, + 694, + 533, + 728 + ], + "type": "text", + "content": "Figure 47: Task: Image to X: Referring expression segmentation. Evaluate the grounding and grouping ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Sa2VA [117]. Observation: These examples indicate that current GPT-4o has weak pixel-level grounding ability." + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 211, + 559, + 297, + 647 + ], + "blocks": [ + { + "bbox": [ + 211, + 559, + 297, + 647 + ], + "lines": [ + { + "bbox": [ + 211, + 559, + 297, + 647 + ], + "spans": [ + { + "bbox": [ + 211, + 559, + 297, + 647 + ], + "type": "image", + "image_path": "ddccd12b79d35e83a67c0ec67ebca9d6ffdf81f90dbdc128897666c54e5ca249.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 313, + 559, + 402, + 647 + ], + "blocks": [ + { + "bbox": [ + 313, + 559, + 402, + 647 + ], + "lines": [ + { + "bbox": [ + 313, + 559, + 402, + 647 + ], + "spans": [ + { + "bbox": [ + 313, + 559, + 402, + 647 + ], + "type": "image", + "image_path": "82c794a5d723f0b36ec6540fc27ae81b30d682d013d9335993d95bcec71b79ae.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 411, + 559, + 500, + 647 + ], + "blocks": [ + { + "bbox": [ + 411, + 559, + 500, + 647 + ], + "lines": [ + { + "bbox": [ + 411, + 559, + 500, + 647 + ], + "spans": [ + { + "bbox": [ + 411, + 559, + 500, + 647 + ], + "type": "image", + "image_path": "cf3e7ef91003317002f53f486a244b5640ca63c14670c2003aa978121ccb908c.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "bbox": [ + 116, + 649, + 485, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 649, + 485, + 660 + ], + "spans": [ + { + "bbox": [ + 116, + 649, + 485, + 660 + ], + "type": "text", + "content": "Input Text: \"Please segment the bag in the image and directly generate the output image.\"" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 129, + 667, + 181, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 667, + 181, + 677 + ], + "spans": [ + { + "bbox": [ + 129, + 667, + 181, + 677 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 241, + 667, + 271, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 667, + 271, + 677 + ], + "spans": [ + { + "bbox": [ + 241, + 667, + 271, + 677 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 322, + 667, + 390, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 667, + 390, + 677 + ], + "spans": [ + { + "bbox": [ + 322, + 667, + 390, + 677 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 441, + 667, + 471, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 441, + 667, + 471, + 677 + ], + "spans": [ + { + "bbox": [ + 441, + 667, + 471, + 677 + ], + "type": "text", + "content": "Sa2VA" + } + ] + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "63" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 62 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 157, + 140, + 170, + 154 + ], + "blocks": [ + { + "bbox": [ + 157, + 140, + 170, + 154 + ], + "lines": [ + { + "bbox": [ + 157, + 140, + 170, + 154 + ], + "spans": [ + { + "bbox": [ + 157, + 140, + 170, + 154 + ], + "type": "image", + "image_path": "a71db3675a1fc3707882b1ea7573faaee81a29d969a48f7917d231e5c96013a7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 171, + 144, + 452, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 144, + 452, + 156 + ], + "spans": [ + { + "bbox": [ + 171, + 144, + 452, + 156 + ], + "type": "text", + "content": "Evaluation: Semantic Segmentation, Shape and Grouping." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 99, + 158, + 192, + 251 + ], + "blocks": [ + { + "bbox": [ + 99, + 158, + 192, + 251 + ], + "lines": [ + { + "bbox": [ + 99, + 158, + 192, + 251 + ], + "spans": [ + { + "bbox": [ + 99, + 158, + 192, + 251 + ], + "type": "image", + "image_path": "dfa857e58ea3978f96a70b80835f05da2c3c164ea663a535310bee9d91124824.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 203, + 157, + 299, + 250 + ], + "blocks": [ + { + "bbox": [ + 203, + 157, + 299, + 250 + ], + "lines": [ + { + "bbox": [ + 203, + 157, + 299, + 250 + ], + "spans": [ + { + "bbox": [ + 203, + 157, + 299, + 250 + ], + "type": "image", + "image_path": "bffe05ee07a77a1df7cf6cc2b2d3a0cb232aa99efd565f3002c170b0ea6c49e3.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 308, + 157, + 400, + 250 + ], + "blocks": [ + { + "bbox": [ + 308, + 157, + 400, + 250 + ], + "lines": [ + { + "bbox": [ + 308, + 157, + 400, + 250 + ], + "spans": [ + { + "bbox": [ + 308, + 157, + 400, + 250 + ], + "type": "image", + "image_path": "1fc3e7193935e21d941ba2a69b2a1621e26533675595f4e66648793cc5c521ca.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 413, + 157, + 507, + 250 + ], + "blocks": [ + { + "bbox": [ + 413, + 157, + 507, + 250 + ], + "lines": [ + { + "bbox": [ + 413, + 157, + 507, + 250 + ], + "spans": [ + { + "bbox": [ + 413, + 157, + 507, + 250 + ], + "type": "image", + "image_path": "89990c180c4a599a243294b0904684d0351ed3529a4375d02a3ffb1ea5cc5012.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 100, + 253, + 438, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 253, + 438, + 266 + ], + "spans": [ + { + "bbox": [ + 100, + 253, + 438, + 266 + ], + "type": "text", + "content": "Input Text: \"Please generate the semantic segmentation result of the image.\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 100, + 269, + 192, + 360 + ], + "blocks": [ + { + "bbox": [ + 100, + 269, + 192, + 360 + ], + "lines": [ + { + "bbox": [ + 100, + 269, + 192, + 360 + ], + "spans": [ + { + "bbox": [ + 100, + 269, + 192, + 360 + ], + "type": "image", + "image_path": "fbc284435daba3dd588193455507d1744f38c42e568b7de9d51aacf24568bbd6.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 203, + 269, + 299, + 360 + ], + "blocks": [ + { + "bbox": [ + 203, + 269, + 299, + 360 + ], + "lines": [ + { + "bbox": [ + 203, + 269, + 299, + 360 + ], + "spans": [ + { + "bbox": [ + 203, + 269, + 299, + 360 + ], + "type": "image", + "image_path": "6a5d14299085340af2af8df55c7d78da1c76cf94e7c40072a4aa894710067730.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 307, + 269, + 402, + 360 + ], + "blocks": [ + { + "bbox": [ + 307, + 269, + 402, + 360 + ], + "lines": [ + { + "bbox": [ + 307, + 269, + 402, + 360 + ], + "spans": [ + { + "bbox": [ + 307, + 269, + 402, + 360 + ], + "type": "image", + "image_path": "70b3977d31710e5b25126e9195e5448eee93a4c761b993c52e1db1cb26c59702.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 414, + 269, + 507, + 361 + ], + "blocks": [ + { + "bbox": [ + 414, + 269, + 507, + 361 + ], + "lines": [ + { + "bbox": [ + 414, + 269, + 507, + 361 + ], + "spans": [ + { + "bbox": [ + 414, + 269, + 507, + 361 + ], + "type": "image", + "image_path": "835c092bc8f3a790fdd3ce19ea243873bcd1e8c977f1e3b28cb6589fd8a4f735.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 100, + 365, + 436, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 365, + 436, + 378 + ], + "spans": [ + { + "bbox": [ + 100, + 365, + 436, + 378 + ], + "type": "text", + "content": "Input Text: \"Please generate the semantic segmentation result of the image.\"" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 100, + 384, + 192, + 475 + ], + "blocks": [ + { + "bbox": [ + 100, + 384, + 192, + 475 + ], + "lines": [ + { + "bbox": [ + 100, + 384, + 192, + 475 + ], + "spans": [ + { + "bbox": [ + 100, + 384, + 192, + 475 + ], + "type": "image", + "image_path": "0759fb223e415e0f19710f612a3c80b614bae80774a18396353bf7b14eb7da5d.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 203, + 384, + 299, + 475 + ], + "blocks": [ + { + "bbox": [ + 203, + 384, + 299, + 475 + ], + "lines": [ + { + "bbox": [ + 203, + 384, + 299, + 475 + ], + "spans": [ + { + "bbox": [ + 203, + 384, + 299, + 475 + ], + "type": "image", + "image_path": "9915d6c0aa3e038b5ec03ebeb8dcca7a1b7496741251e2af39361e947240d627.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 307, + 384, + 402, + 475 + ], + "blocks": [ + { + "bbox": [ + 307, + 384, + 402, + 475 + ], + "lines": [ + { + "bbox": [ + 307, + 384, + 402, + 475 + ], + "spans": [ + { + "bbox": [ + 307, + 384, + 402, + 475 + ], + "type": "image", + "image_path": "07315caab9c8a97608642744aa18c2e35c6920d402732aa3a2675180b0a7af1e.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 413, + 384, + 507, + 475 + ], + "blocks": [ + { + "bbox": [ + 413, + 384, + 507, + 475 + ], + "lines": [ + { + "bbox": [ + 413, + 384, + 507, + 475 + ], + "spans": [ + { + "bbox": [ + 413, + 384, + 507, + 475 + ], + "type": "image", + "image_path": "4bdca3b7ab2d59c6d6c1458affcfac976bc7751cd6959565ea93ddf768f48481.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 100, + 479, + 436, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 479, + 436, + 491 + ], + "spans": [ + { + "bbox": [ + 100, + 479, + 436, + 491 + ], + "type": "text", + "content": "Input Text: \"Please generate the semantic segmentation result of the image.\"" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 100, + 498, + 192, + 589 + ], + "blocks": [ + { + "bbox": [ + 100, + 498, + 192, + 589 + ], + "lines": [ + { + "bbox": [ + 100, + 498, + 192, + 589 + ], + "spans": [ + { + "bbox": [ + 100, + 498, + 192, + 589 + ], + "type": "image", + "image_path": "1b277bbf258a90079c84ec982452f71ac0f350730f9aea170064b358fe0eb923.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 638, + 533, + 683 + ], + "lines": [ + { + "bbox": [ + 77, + 638, + 533, + 683 + ], + "spans": [ + { + "bbox": [ + 77, + 638, + 533, + 683 + ], + "type": "text", + "content": "Figure 48: Task: Image to X: Semantic segmentation. Evaluate the shape and grouping ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Deeplab-V3+ [14]. Observation: Compared with Gemin-2.0, the mask quality of GPT-4o is good. However, there are still huge gaps in the standard semantic segmentation format." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 203, + 498, + 299, + 589 + ], + "blocks": [ + { + "bbox": [ + 203, + 498, + 299, + 589 + ], + "lines": [ + { + "bbox": [ + 203, + 498, + 299, + 589 + ], + "spans": [ + { + "bbox": [ + 203, + 498, + 299, + 589 + ], + "type": "image", + "image_path": "a6a106419b52434d8a55c2b5fb821d02c748ecc0597904cf146832238e26aadd.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 307, + 498, + 402, + 589 + ], + "blocks": [ + { + "bbox": [ + 307, + 498, + 402, + 589 + ], + "lines": [ + { + "bbox": [ + 307, + 498, + 402, + 589 + ], + "spans": [ + { + "bbox": [ + 307, + 498, + 402, + 589 + ], + "type": "image", + "image_path": "fd3f62eae10787f13ed9ce858671690809ab040be8b604ced0e3044aef5bc8d9.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 413, + 498, + 507, + 589 + ], + "blocks": [ + { + "bbox": [ + 413, + 498, + 507, + 589 + ], + "lines": [ + { + "bbox": [ + 413, + 498, + 507, + 589 + ], + "spans": [ + { + "bbox": [ + 413, + 498, + 507, + 589 + ], + "type": "image", + "image_path": "aa66b3606a91f980129cef7b09a646b9b32aae5937a1a800da9a62b68a314796.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 590, + 441, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 590, + 441, + 603 + ], + "spans": [ + { + "bbox": [ + 105, + 590, + 441, + 603 + ], + "type": "text", + "content": "Input Text: \"Please generate the semantic segmentation result of the image.\"" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 114, + 606, + 170, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 606, + 170, + 619 + ], + "spans": [ + { + "bbox": [ + 114, + 606, + 170, + 619 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 231, + 606, + 264, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 606, + 264, + 616 + ], + "spans": [ + { + "bbox": [ + 231, + 606, + 264, + 616 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 318, + 606, + 389, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 606, + 389, + 616 + ], + "spans": [ + { + "bbox": [ + 318, + 606, + 389, + 616 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 430, + 606, + 485, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 430, + 606, + 485, + 618 + ], + "spans": [ + { + "bbox": [ + 430, + 606, + 485, + 618 + ], + "type": "text", + "content": "Deeplab-V3+" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 124, + 118, + 183, + 131 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 118, + 183, + 131 + ], + "spans": [ + { + "bbox": [ + 124, + 118, + 183, + 131 + ], + "type": "text", + "content": "Image-to-X" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "64" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 63 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 166, + 99, + 178, + 114 + ], + "blocks": [ + { + "bbox": [ + 166, + 99, + 178, + 114 + ], + "lines": [ + { + "bbox": [ + 166, + 99, + 178, + 114 + ], + "spans": [ + { + "bbox": [ + 166, + 99, + 178, + 114 + ], + "type": "image", + "image_path": "9fe5199286f0112238a7073ea1794d37f5662a85af0714537b15349dcc732e6c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 179, + 102, + 444, + 115 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 102, + 444, + 115 + ], + "spans": [ + { + "bbox": [ + 179, + 102, + 444, + 115 + ], + "type": "text", + "content": "Evaluation: Panoptic Segmentation, Grouping and Shape." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 110, + 120, + 200, + 209 + ], + "blocks": [ + { + "bbox": [ + 110, + 120, + 200, + 209 + ], + "lines": [ + { + "bbox": [ + 110, + 120, + 200, + 209 + ], + "spans": [ + { + "bbox": [ + 110, + 120, + 200, + 209 + ], + "type": "image", + "image_path": "dfdea49947535ad053094764e4f1c4f57b87c66ef48041b87f0c1403e8ebfb06.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 210, + 119, + 300, + 209 + ], + "blocks": [ + { + "bbox": [ + 210, + 119, + 300, + 209 + ], + "lines": [ + { + "bbox": [ + 210, + 119, + 300, + 209 + ], + "spans": [ + { + "bbox": [ + 210, + 119, + 300, + 209 + ], + "type": "image", + "image_path": "0d5ac0ba593b14c0ff43464e9d8cb2a78e3c44bedcbbb29c36a51144ad9c8c34.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 313, + 120, + 400, + 209 + ], + "blocks": [ + { + "bbox": [ + 313, + 120, + 400, + 209 + ], + "lines": [ + { + "bbox": [ + 313, + 120, + 400, + 209 + ], + "spans": [ + { + "bbox": [ + 313, + 120, + 400, + 209 + ], + "type": "image", + "image_path": "ae911cf1c44c913e1579fdc6ee25f6acd40fabf3b6bb39cb4fd1df2fd7c49995.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 411, + 120, + 500, + 209 + ], + "blocks": [ + { + "bbox": [ + 411, + 120, + 500, + 209 + ], + "lines": [ + { + "bbox": [ + 411, + 120, + 500, + 209 + ], + "spans": [ + { + "bbox": [ + 411, + 120, + 500, + 209 + ], + "type": "image", + "image_path": "1b3602b90061d565f84e84ed00f2de84147c20f674c0bc022d3fa5b9bd5926a9.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 110, + 212, + 427, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 212, + 427, + 223 + ], + "spans": [ + { + "bbox": [ + 110, + 212, + 427, + 223 + ], + "type": "text", + "content": "Input Text: \"Please generate the panoptic segmentation result of the image.\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 110, + 226, + 200, + 316 + ], + "blocks": [ + { + "bbox": [ + 110, + 226, + 200, + 316 + ], + "lines": [ + { + "bbox": [ + 110, + 226, + 200, + 316 + ], + "spans": [ + { + "bbox": [ + 110, + 226, + 200, + 316 + ], + "type": "image", + "image_path": "9164a12913f12d50e14960da007a5f198f424495fc1f7b5da62b254cd8bd48e1.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 211, + 226, + 300, + 316 + ], + "blocks": [ + { + "bbox": [ + 211, + 226, + 300, + 316 + ], + "lines": [ + { + "bbox": [ + 211, + 226, + 300, + 316 + ], + "spans": [ + { + "bbox": [ + 211, + 226, + 300, + 316 + ], + "type": "image", + "image_path": "3745e5cc44c159dbc30c05e6abfee39434906086aa082386ab669e6a3800719e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 312, + 226, + 400, + 316 + ], + "blocks": [ + { + "bbox": [ + 312, + 226, + 400, + 316 + ], + "lines": [ + { + "bbox": [ + 312, + 226, + 400, + 316 + ], + "spans": [ + { + "bbox": [ + 312, + 226, + 400, + 316 + ], + "type": "image", + "image_path": "600cb4b1adf7ff450977eb1ff9c1356ca0f2202a614d5297fdfa99e34eb010fc.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 411, + 226, + 500, + 316 + ], + "blocks": [ + { + "bbox": [ + 411, + 226, + 500, + 316 + ], + "lines": [ + { + "bbox": [ + 411, + 226, + 500, + 316 + ], + "spans": [ + { + "bbox": [ + 411, + 226, + 500, + 316 + ], + "type": "image", + "image_path": "1e96cf20eb72b581b66ee41d6ac4ce09a54c1964332e8249d136a88fe87afae1.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 110, + 318, + 427, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 318, + 427, + 330 + ], + "spans": [ + { + "bbox": [ + 110, + 318, + 427, + 330 + ], + "type": "text", + "content": "Input Text: \"Please generate the panoptic segmentation result of the image.\"" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 110, + 335, + 200, + 425 + ], + "blocks": [ + { + "bbox": [ + 110, + 335, + 200, + 425 + ], + "lines": [ + { + "bbox": [ + 110, + 335, + 200, + 425 + ], + "spans": [ + { + "bbox": [ + 110, + 335, + 200, + 425 + ], + "type": "image", + "image_path": "bba2b69130e87b6f2573ea275d85a084e8a7be0d184cc6aba44beed927746b1f.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 211, + 335, + 299, + 424 + ], + "blocks": [ + { + "bbox": [ + 211, + 335, + 299, + 424 + ], + "lines": [ + { + "bbox": [ + 211, + 335, + 299, + 424 + ], + "spans": [ + { + "bbox": [ + 211, + 335, + 299, + 424 + ], + "type": "image", + "image_path": "98a34d23216e8ad84b654750599c6cf856ae93a257ecd204feb4347ff25f2411.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 313, + 335, + 402, + 425 + ], + "blocks": [ + { + "bbox": [ + 313, + 335, + 402, + 425 + ], + "lines": [ + { + "bbox": [ + 313, + 335, + 402, + 425 + ], + "spans": [ + { + "bbox": [ + 313, + 335, + 402, + 425 + ], + "type": "image", + "image_path": "e19a526ae728ef205a6d09c27b250ea5bcb7433bc30ff0fe90075b9fa80b7e50.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 411, + 335, + 498, + 424 + ], + "blocks": [ + { + "bbox": [ + 411, + 335, + 498, + 424 + ], + "lines": [ + { + "bbox": [ + 411, + 335, + 498, + 424 + ], + "spans": [ + { + "bbox": [ + 411, + 335, + 498, + 424 + ], + "type": "image", + "image_path": "36a0b0bce8ab290695f007ef9c9240f85222009951f43efd16512c909c48a75e.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 110, + 426, + 427, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 426, + 427, + 438 + ], + "spans": [ + { + "bbox": [ + 110, + 426, + 427, + 438 + ], + "type": "text", + "content": "Input Text: \"Please generate the panoptic segmentation result of the image.\"" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 110, + 443, + 200, + 535 + ], + "blocks": [ + { + "bbox": [ + 110, + 443, + 200, + 535 + ], + "lines": [ + { + "bbox": [ + 110, + 443, + 200, + 535 + ], + "spans": [ + { + "bbox": [ + 110, + 443, + 200, + 535 + ], + "type": "image", + "image_path": "a3692ab260876169e43b10f0a13b3e0a2358a469179695d6391b7121a291f656.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 211, + 443, + 299, + 534 + ], + "blocks": [ + { + "bbox": [ + 211, + 443, + 299, + 534 + ], + "lines": [ + { + "bbox": [ + 211, + 443, + 299, + 534 + ], + "spans": [ + { + "bbox": [ + 211, + 443, + 299, + 534 + ], + "type": "image", + "image_path": "04d61817b36efe3075ecd5a5ab06c82b578abc68918b2566f24ba1ac9e539e1b.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 312, + 444, + 401, + 535 + ], + "blocks": [ + { + "bbox": [ + 312, + 444, + 401, + 535 + ], + "lines": [ + { + "bbox": [ + 312, + 444, + 401, + 535 + ], + "spans": [ + { + "bbox": [ + 312, + 444, + 401, + 535 + ], + "type": "image", + "image_path": "76af15fc690cc0e7fcb7cc802c4e12a650c1bda8daa750afc68a7050560ab699.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 411, + 445, + 498, + 535 + ], + "blocks": [ + { + "bbox": [ + 411, + 445, + 498, + 535 + ], + "lines": [ + { + "bbox": [ + 411, + 445, + 498, + 535 + ], + "spans": [ + { + "bbox": [ + 411, + 445, + 498, + 535 + ], + "type": "image", + "image_path": "f550448a4b54f7ddaffc6aa7b44e8f05342e71009989a87f527ce649b3de480a.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 115, + 536, + 432, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 536, + 432, + 548 + ], + "spans": [ + { + "bbox": [ + 115, + 536, + 432, + 548 + ], + "type": "text", + "content": "Input Text: \"Please generate the panoptic segmentation result of the image.\"" + } + ] + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 110, + 552, + 200, + 639 + ], + "blocks": [ + { + "bbox": [ + 110, + 552, + 200, + 639 + ], + "lines": [ + { + "bbox": [ + 110, + 552, + 200, + 639 + ], + "spans": [ + { + "bbox": [ + 110, + 552, + 200, + 639 + ], + "type": "image", + "image_path": "7ea2069c70699c15434b740761c5c8214d80002c7982c766b3b5d6af3f7b2f9d.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 689, + 533, + 733 + ], + "lines": [ + { + "bbox": [ + 77, + 689, + 533, + 733 + ], + "spans": [ + { + "bbox": [ + 77, + 689, + 533, + 733 + ], + "type": "text", + "content": "Figure 49: Task: Image to X: Panoptic segmentation. Evaluate the shape and grouping ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and K-Net [123]. Observation: GPT-4o can understand the panoptic segmentation task, while Gemini 2.0 Flash cannot do this task in the first and third cases." + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 211, + 551, + 299, + 639 + ], + "blocks": [ + { + "bbox": [ + 211, + 551, + 299, + 639 + ], + "lines": [ + { + "bbox": [ + 211, + 551, + 299, + 639 + ], + "spans": [ + { + "bbox": [ + 211, + 551, + 299, + 639 + ], + "type": "image", + "image_path": "7a80ead376e4c816f64243eb12d3db5695241cc5988afc361964743b675d9e97.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 313, + 552, + 401, + 639 + ], + "blocks": [ + { + "bbox": [ + 313, + 552, + 401, + 639 + ], + "lines": [ + { + "bbox": [ + 313, + 552, + 401, + 639 + ], + "spans": [ + { + "bbox": [ + 313, + 552, + 401, + 639 + ], + "type": "image", + "image_path": "d1b34b2ffc2bfa4c9301c7d1c2ce46deef595c8d9d33f80db8a4a3538f6a89c5.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 411, + 552, + 498, + 639 + ], + "blocks": [ + { + "bbox": [ + 411, + 552, + 498, + 639 + ], + "lines": [ + { + "bbox": [ + 411, + 552, + 498, + 639 + ], + "spans": [ + { + "bbox": [ + 411, + 552, + 498, + 639 + ], + "type": "image", + "image_path": "2e8e24d1cba72b5e1d3fa71c6329231f5130c1a2c84e08710a2bfcef26d50f3e.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "bbox": [ + 115, + 641, + 432, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 641, + 432, + 653 + ], + "spans": [ + { + "bbox": [ + 115, + 641, + 432, + 653 + ], + "type": "text", + "content": "Input Text: \"Please generate the panoptic segmentation result of the image.\"" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 129, + 659, + 181, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 659, + 181, + 670 + ], + "spans": [ + { + "bbox": [ + 129, + 659, + 181, + 670 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 241, + 659, + 271, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 659, + 271, + 669 + ], + "spans": [ + { + "bbox": [ + 241, + 659, + 271, + 669 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 322, + 659, + 390, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 659, + 390, + 669 + ], + "spans": [ + { + "bbox": [ + 322, + 659, + 390, + 669 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 443, + 659, + 470, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 443, + 659, + 470, + 669 + ], + "spans": [ + { + "bbox": [ + 443, + 659, + 470, + 669 + ], + "type": "text", + "content": "K-Net" + } + ] + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 129, + 79, + 186, + 91 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 79, + 186, + 91 + ], + "spans": [ + { + "bbox": [ + 129, + 79, + 186, + 91 + ], + "type": "text", + "content": "Image-to-X" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "65" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 64 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 176, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 176, + 83 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 176, + 83 + ], + "type": "text", + "content": "2.4.2 Edge Detection" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 91, + 533, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 91, + 533, + 137 + ], + "spans": [ + { + "bbox": [ + 77, + 91, + 533, + 137 + ], + "type": "text", + "content": "Edge Detection. As a classic vision task, edge detection aims to identify the boundaries or edges of objects within an image. These edges represent the locations with significant changes in image intensity, color, or other visual features. Common edge detection operators include the Sobel, Prewitt, and Canny operators. Recent works adopt deep learning-based approaches." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 140, + 533, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 140, + 533, + 196 + ], + "spans": [ + { + "bbox": [ + 77, + 140, + 533, + 196 + ], + "type": "text", + "content": "In Figure 50, we compare this ability with a recent SOTA deep learning based approach, EMDB [56]. For four examples, we find both GPT-4o and Gemini 2.0 Flash can detect object edges for both foreground and background objects. In addition, the details are even good using GPT-4o. We find two critical issues: 1) The spatial localization of GPT-4o is changed as observed by the segmentation tasks. 2) The content of GPT-4o is also changed. For example, in the first example, the road is generated, which does not exist in the input image." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 200, + 533, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 200, + 533, + 245 + ], + "spans": [ + { + "bbox": [ + 77, + 200, + 533, + 245 + ], + "type": "text", + "content": "Image Matting. Image matting is a technique in image processing that aims to separate a foreground object from its background and obtain a detailed alpha matte, which indicates the transparency or opacity of each pixel in the foreground. It goes beyond simple segmentation by providing more precise information about the boundaries and fine details of the object, especially for complex objects like hair or smoke." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 249, + 533, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 249, + 533, + 316 + ], + "spans": [ + { + "bbox": [ + 77, + 249, + 533, + 316 + ], + "type": "text", + "content": "In Figure 51, we show three testing examples, with one expert model, Matting Anything [53]. Compared with Gemini, GPT-4o can handle the simple cases, as shown in the third row. Thus, it can understand the task goal. For example, it can even keep the fine-grained details of a horse hair. However, considering the strict requirements of image matting (fine-grained and aligned details), the overall quality is bad. Compared with Matting Anything, both GPT-4o and Gemini work poorly. We find nearly the same issues: 1) Wrong spatial localization, 2) Changed contents." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "66" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 65 + }, + { + "para_blocks": [ + { + "bbox": [ + 120, + 176, + 179, + 190 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 176, + 179, + 190 + ], + "spans": [ + { + "bbox": [ + 120, + 176, + 179, + 190 + ], + "type": "text", + "content": "Image-to-X" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 205, + 200, + 419, + 214 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 200, + 419, + 214 + ], + "spans": [ + { + "bbox": [ + 205, + 200, + 419, + 214 + ], + "type": "text", + "content": "Evaluation: Edge Detection, Shape Analysis." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 89, + 219, + 200, + 293 + ], + "blocks": [ + { + "bbox": [ + 89, + 219, + 200, + 293 + ], + "lines": [ + { + "bbox": [ + 89, + 219, + 200, + 293 + ], + "spans": [ + { + "bbox": [ + 89, + 219, + 200, + 293 + ], + "type": "image", + "image_path": "0cfb348c56fe1101617d57441e326b36eadf434565b6c1880650127baaf8224e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 200, + 220, + 294, + 293 + ], + "blocks": [ + { + "bbox": [ + 200, + 220, + 294, + 293 + ], + "lines": [ + { + "bbox": [ + 200, + 220, + 294, + 293 + ], + "spans": [ + { + "bbox": [ + 200, + 220, + 294, + 293 + ], + "type": "image", + "image_path": "7bf3e8e8fcd88c5d02dc116f91c2aa8891be20f22eda6960e71e5c24543006c7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 294, + 220, + 406, + 293 + ], + "blocks": [ + { + "bbox": [ + 294, + 220, + 406, + 293 + ], + "lines": [ + { + "bbox": [ + 294, + 220, + 406, + 293 + ], + "spans": [ + { + "bbox": [ + 294, + 220, + 406, + 293 + ], + "type": "image", + "image_path": "d8de1fb96772ae2a9a2c7fa60dfee2b9c9838cdd7633d066debcab1d110abac9.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 406, + 219, + 516, + 293 + ], + "blocks": [ + { + "bbox": [ + 406, + 219, + 516, + 293 + ], + "lines": [ + { + "bbox": [ + 406, + 219, + 516, + 293 + ], + "spans": [ + { + "bbox": [ + 406, + 219, + 516, + 293 + ], + "type": "image", + "image_path": "c57774706ddf76b7eafe1bc39859e9e41d2e9be8a2ed40d9b69c030d906ac930.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 99, + 301, + 479, + 315 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 301, + 479, + 315 + ], + "spans": [ + { + "bbox": [ + 99, + 301, + 479, + 315 + ], + "type": "text", + "content": "Input Text: \"Please detect the edge of object in this image and output the final image.\"" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 88, + 323, + 201, + 399 + ], + "blocks": [ + { + "bbox": [ + 88, + 323, + 201, + 399 + ], + "lines": [ + { + "bbox": [ + 88, + 323, + 201, + 399 + ], + "spans": [ + { + "bbox": [ + 88, + 323, + 201, + 399 + ], + "type": "image", + "image_path": "35081435a9dddead08f2e1591ed883e214e7ed45a52ef1bb2df34789a8f65f50.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 588, + 532, + 623 + ], + "lines": [ + { + "bbox": [ + 77, + 588, + 532, + 623 + ], + "spans": [ + { + "bbox": [ + 77, + 588, + 532, + 623 + ], + "type": "text", + "content": "Figure 50: Task: Image to X: Edge detection. Evaluate the shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and EDMB [56]. Observation: We find both GPT-4o and Gemini 2.0 Flash can detect object edges for both foreground and background objects." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 201, + 323, + 282, + 399 + ], + "blocks": [ + { + "bbox": [ + 201, + 323, + 282, + 399 + ], + "lines": [ + { + "bbox": [ + 201, + 323, + 282, + 399 + ], + "spans": [ + { + "bbox": [ + 201, + 323, + 282, + 399 + ], + "type": "image", + "image_path": "61a9787b52f106410ced18bbf723e46ef2d3db614a1b9796dd588397ad04206e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 282, + 323, + 397, + 400 + ], + "blocks": [ + { + "bbox": [ + 282, + 323, + 397, + 400 + ], + "lines": [ + { + "bbox": [ + 282, + 323, + 397, + 400 + ], + "spans": [ + { + "bbox": [ + 282, + 323, + 397, + 400 + ], + "type": "image", + "image_path": "a0ebad62eca5fc477e0815751adacf1e46b3abcdce6c306101ecb96b7043a8c4.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 400, + 323, + 517, + 401 + ], + "blocks": [ + { + "bbox": [ + 400, + 323, + 517, + 401 + ], + "lines": [ + { + "bbox": [ + 400, + 323, + 517, + 401 + ], + "spans": [ + { + "bbox": [ + 400, + 323, + 517, + 401 + ], + "type": "image", + "image_path": "425b5ab97efea03f5a9f305b7eb62c794a077f423e347f1fbc1c194faca7ed85.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 99, + 403, + 479, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 403, + 479, + 415 + ], + "spans": [ + { + "bbox": [ + 99, + 403, + 479, + 415 + ], + "type": "text", + "content": "Input Text: \"Please detect the edge of object in this image and output the final image.\"" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 88, + 416, + 194, + 522 + ], + "blocks": [ + { + "bbox": [ + 88, + 416, + 194, + 522 + ], + "lines": [ + { + "bbox": [ + 88, + 416, + 194, + 522 + ], + "spans": [ + { + "bbox": [ + 88, + 416, + 194, + 522 + ], + "type": "image", + "image_path": "63d21e13c3a1a35a1fd1c9cedc021755050cc1398455173eaf4da3af031229aa.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 194, + 415, + 299, + 522 + ], + "blocks": [ + { + "bbox": [ + 194, + 415, + 299, + 522 + ], + "lines": [ + { + "bbox": [ + 194, + 415, + 299, + 522 + ], + "spans": [ + { + "bbox": [ + 194, + 415, + 299, + 522 + ], + "type": "image", + "image_path": "1e393f9e4a534fd74efa40049b2b251a874ba84af7a4ec4b9bafb559308b436f.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 301, + 416, + 406, + 522 + ], + "blocks": [ + { + "bbox": [ + 301, + 416, + 406, + 522 + ], + "lines": [ + { + "bbox": [ + 301, + 416, + 406, + 522 + ], + "spans": [ + { + "bbox": [ + 301, + 416, + 406, + 522 + ], + "type": "image", + "image_path": "7af8979fc8ec37fdc42425294aa893ae45a15b13d8806f2f8c4eac75eae9952b.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 406, + 416, + 513, + 522 + ], + "blocks": [ + { + "bbox": [ + 406, + 416, + 513, + 522 + ], + "lines": [ + { + "bbox": [ + 406, + 416, + 513, + 522 + ], + "spans": [ + { + "bbox": [ + 406, + 416, + 513, + 522 + ], + "type": "image", + "image_path": "b41cebcb69ab1488c833010b4ba4c0e40ecd6d2c22ed7583995ccecfc50154af.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 98, + 528, + 479, + 541 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 528, + 479, + 541 + ], + "spans": [ + { + "bbox": [ + 98, + 528, + 479, + 541 + ], + "type": "text", + "content": "Input Text: \"Please detect the edge of object in this image and output the final image.\"" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 113, + 552, + 169, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 552, + 169, + 565 + ], + "spans": [ + { + "bbox": [ + 113, + 552, + 169, + 565 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 231, + 552, + 264, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 552, + 264, + 563 + ], + "spans": [ + { + "bbox": [ + 231, + 552, + 264, + 563 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 552, + 389, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 552, + 389, + 563 + ], + "spans": [ + { + "bbox": [ + 317, + 552, + 389, + 563 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 445, + 552, + 473, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 445, + 552, + 473, + 563 + ], + "spans": [ + { + "bbox": [ + 445, + 552, + 473, + 563 + ], + "type": "text", + "content": "EDMB" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "67" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 66 + }, + { + "para_blocks": [ + { + "bbox": [ + 120, + 149, + 179, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 149, + 179, + 163 + ], + "spans": [ + { + "bbox": [ + 120, + 149, + 179, + 163 + ], + "type": "text", + "content": "Image-to-X" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 179, + 171, + 192, + 184 + ], + "blocks": [ + { + "bbox": [ + 179, + 171, + 192, + 184 + ], + "lines": [ + { + "bbox": [ + 179, + 171, + 192, + 184 + ], + "spans": [ + { + "bbox": [ + 179, + 171, + 192, + 184 + ], + "type": "image", + "image_path": "7ee4ab20360492c6594cd85f6e0806bdd6ac326a6b9f951620d2d3e338cdc66e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 192, + 173, + 431, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 173, + 431, + 186 + ], + "spans": [ + { + "bbox": [ + 192, + 173, + 431, + 186 + ], + "type": "text", + "content": "Evaluation: Image Matting, Grouping and Shape." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 101, + 193, + 196, + 287 + ], + "blocks": [ + { + "bbox": [ + 101, + 193, + 196, + 287 + ], + "lines": [ + { + "bbox": [ + 101, + 193, + 196, + 287 + ], + "spans": [ + { + "bbox": [ + 101, + 193, + 196, + 287 + ], + "type": "image", + "image_path": "6e8e2c78c4556e12bb41898d5bd695dbc241b0124a7f8964bc5853e2c5f14dcc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 205, + 193, + 301, + 286 + ], + "blocks": [ + { + "bbox": [ + 205, + 193, + 301, + 286 + ], + "lines": [ + { + "bbox": [ + 205, + 193, + 301, + 286 + ], + "spans": [ + { + "bbox": [ + 205, + 193, + 301, + 286 + ], + "type": "image", + "image_path": "d4c547c6a3d707292ee701dab81a3f08897230cfc304d056c77c1b62ede0a10e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 310, + 193, + 405, + 286 + ], + "blocks": [ + { + "bbox": [ + 310, + 193, + 405, + 286 + ], + "lines": [ + { + "bbox": [ + 310, + 193, + 405, + 286 + ], + "spans": [ + { + "bbox": [ + 310, + 193, + 405, + 286 + ], + "type": "image", + "image_path": "4bad73d42a2bdd07f8a75924ab1f205dab5f3e1919f244ff0bab551eb3c0c55d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 417, + 193, + 511, + 286 + ], + "blocks": [ + { + "bbox": [ + 417, + 193, + 511, + 286 + ], + "lines": [ + { + "bbox": [ + 417, + 193, + 511, + 286 + ], + "spans": [ + { + "bbox": [ + 417, + 193, + 511, + 286 + ], + "type": "image", + "image_path": "38e332ce3a2562225f948c109c071db065296fc2fbfae678e295cb014e1ab37d.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 99, + 289, + 508, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 289, + 508, + 312 + ], + "spans": [ + { + "bbox": [ + 99, + 289, + 508, + 312 + ], + "type": "text", + "content": "Input Text: \"Please Please matting the foreground and remove the background. Please directly generate the output image.\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 101, + 315, + 196, + 410 + ], + "blocks": [ + { + "bbox": [ + 101, + 315, + 196, + 410 + ], + "lines": [ + { + "bbox": [ + 101, + 315, + 196, + 410 + ], + "spans": [ + { + "bbox": [ + 101, + 315, + 196, + 410 + ], + "type": "image", + "image_path": "f36d385ab421fbf6d5de21b9a66c5a418a213e03c378e1602b8431ffa1b78dec.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 594, + 532, + 649 + ], + "lines": [ + { + "bbox": [ + 77, + 594, + 532, + 649 + ], + "spans": [ + { + "bbox": [ + 77, + 594, + 532, + 649 + ], + "type": "text", + "content": "Figure 51: Task: Image to X: Image matting. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Matting Anything [54]. Observation: Compared with Gemini, GPT-4o can handle the simple cases, as shown in the third row. However, considering the strict requirements of image matting (fine-grained and aligned details), the overall quality is bad." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 205, + 315, + 293, + 409 + ], + "blocks": [ + { + "bbox": [ + 205, + 315, + 293, + 409 + ], + "lines": [ + { + "bbox": [ + 205, + 315, + 293, + 409 + ], + "spans": [ + { + "bbox": [ + 205, + 315, + 293, + 409 + ], + "type": "image", + "image_path": "288f52547eff2bdfad27eac3648ba1b7a88af6beceffdce0913c9495832f57fb.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 315, + 406, + 409 + ], + "blocks": [ + { + "bbox": [ + 310, + 315, + 406, + 409 + ], + "lines": [ + { + "bbox": [ + 310, + 315, + 406, + 409 + ], + "spans": [ + { + "bbox": [ + 310, + 315, + 406, + 409 + ], + "type": "image", + "image_path": "301c1384224e00b3fa898587c4f5578363c3b8189cb4bd2ba2d363bd9d31a9c8.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 416, + 315, + 510, + 409 + ], + "blocks": [ + { + "bbox": [ + 416, + 315, + 510, + 409 + ], + "lines": [ + { + "bbox": [ + 416, + 315, + 510, + 409 + ], + "spans": [ + { + "bbox": [ + 416, + 315, + 510, + 409 + ], + "type": "image", + "image_path": "b95a63199f82c29f9f6a1d717b00f28bde57c3fa11bf2de4c55eab61642dcf7e.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 100, + 410, + 509, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 410, + 509, + 432 + ], + "spans": [ + { + "bbox": [ + 100, + 410, + 509, + 432 + ], + "type": "text", + "content": "Input Text: \"Please Please matting the foreground and remove the background. Please directly generate the output image.\"" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 101, + 435, + 194, + 530 + ], + "blocks": [ + { + "bbox": [ + 101, + 435, + 194, + 530 + ], + "lines": [ + { + "bbox": [ + 101, + 435, + 194, + 530 + ], + "spans": [ + { + "bbox": [ + 101, + 435, + 194, + 530 + ], + "type": "image", + "image_path": "e6a106a5ad2b6d2b4ff973ae0d66eb902d883da385dea0f75e0dbef3aaddd4b0.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 205, + 441, + 306, + 522 + ], + "blocks": [ + { + "bbox": [ + 205, + 441, + 306, + 522 + ], + "lines": [ + { + "bbox": [ + 205, + 441, + 306, + 522 + ], + "spans": [ + { + "bbox": [ + 205, + 441, + 306, + 522 + ], + "type": "image", + "image_path": "81b21df800b94c90849050cd7c2fb977ff2ab50081bab2870fe2b7f18fd3d602.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 310, + 435, + 403, + 529 + ], + "blocks": [ + { + "bbox": [ + 310, + 435, + 403, + 529 + ], + "lines": [ + { + "bbox": [ + 310, + 435, + 403, + 529 + ], + "spans": [ + { + "bbox": [ + 310, + 435, + 403, + 529 + ], + "type": "image", + "image_path": "6af19de892cfb5e57fdbd96c732a8b33020d3b991fab34b219b07d9804454a8f.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 416, + 436, + 510, + 530 + ], + "blocks": [ + { + "bbox": [ + 416, + 436, + 510, + 530 + ], + "lines": [ + { + "bbox": [ + 416, + 436, + 510, + 530 + ], + "spans": [ + { + "bbox": [ + 416, + 436, + 510, + 530 + ], + "type": "image", + "image_path": "851db8a91140cd33caf2d0e7e2e00637b208f9f9b4690e97f12ffd0fbb726feb.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 100, + 531, + 508, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 531, + 508, + 554 + ], + "spans": [ + { + "bbox": [ + 100, + 531, + 508, + 554 + ], + "type": "text", + "content": "Input Text: \"Please Please matting the foreground and remove the background. Please directly generate the output image.\"" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 119, + 557, + 175, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 557, + 175, + 570 + ], + "spans": [ + { + "bbox": [ + 119, + 557, + 175, + 570 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 237, + 557, + 269, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 557, + 269, + 567 + ], + "spans": [ + { + "bbox": [ + 237, + 557, + 269, + 567 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 323, + 557, + 394, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 557, + 394, + 567 + ], + "spans": [ + { + "bbox": [ + 323, + 557, + 394, + 567 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 445, + 551, + 483, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 445, + 551, + 483, + 574 + ], + "spans": [ + { + "bbox": [ + 445, + 551, + 483, + 574 + ], + "type": "text", + "content": "Matting Anything" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "68" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 67 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 173, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 173, + 84 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 173, + 84 + ], + "type": "text", + "content": "2.4.3 Salient Object" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 91, + 533, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 91, + 533, + 114 + ], + "spans": [ + { + "bbox": [ + 77, + 91, + 533, + 114 + ], + "type": "text", + "content": "Salient Object Detection. Salient object detection is a crucial technique in the field of computer vision and image processing. It aims to identify and locate the most visually prominent objects within an image or a video sequence." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 118, + 531, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 118, + 531, + 185 + ], + "spans": [ + { + "bbox": [ + 77, + 118, + 531, + 185 + ], + "type": "text", + "content": "In Figure 52, we adopt one expert model, BiRefNet [127], as reference. For all examples, compared with Gemini 2.0 Flash, GPT-4o can detect relevant salient objects with the text prompts while Gemini can not achieve this. The second example shows that the GPT-4o can generate the aligned salient masks. However, for other examples, the spatial location is not changed where the results are generated according to the input image and potential classes. In the last examples, GPT-4o cannot generate multiple salient object masks, which is also a limitation when dealing with multiple objects." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 190, + 531, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 190, + 531, + 213 + ], + "spans": [ + { + "bbox": [ + 77, + 190, + 531, + 213 + ], + "type": "text", + "content": "Mirror Detection. Mirror detection is a task in computer vision that focuses on identifying mirror surfaces within an image or a scene. Previous works explore this direction by adopting visual cues and geometric cues." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 217, + 531, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 217, + 531, + 283 + ], + "spans": [ + { + "bbox": [ + 77, + 217, + 531, + 283 + ], + "type": "text", + "content": "In Figure 53, we also explore this ability for both GPT-4o and Gemini 2.0 Flash. As for comparison, we adopt a recent SOTA expert model, VMD [107]. For simple cases, we find that GPT-4o can carry out mirror detection, as shown in the first example. For the complex scene, it cannot work as well as the expert model, VMD. As shown in the second example, it generates a fake mirror and leads to a wrong image output with a line to mark the boundaries of the fake mirror. As shown in the last row, GPT-4o treats several rectangular objects as mirrors, leading to several false positive examples." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 77, + 288, + 531, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 288, + 531, + 321 + ], + "spans": [ + { + "bbox": [ + 77, + 288, + 531, + 321 + ], + "type": "text", + "content": "**Shadow Detection.** Shadow detection is a significant process in computer vision and image processing that aims to identify and localize shadow regions in an image or a video. This technique is crucial, as shadows can otherwise disrupt object detection, recognition, and scene analysis." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 77, + 326, + 531, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 326, + 531, + 381 + ], + "spans": [ + { + "bbox": [ + 77, + 326, + 531, + 381 + ], + "type": "text", + "content": "In Figure 54, we compare and test this ability for GPT-4o. We adopt the SOTA model, SDDNet [21] for reference. For the simple examples (single objects and no objects in the image), both GPT-4o and Gemini can localize the shadow, as shown in the first two rows. For more complex examples, both models detect both objects and their shadows with one mask output, as shown in the last two rows. Thus, GPT-4o cannot handle these inputs. In addition, the spatial misalignments also happen for all the cases." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 77, + 386, + 531, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 386, + 531, + 430 + ], + "spans": [ + { + "bbox": [ + 77, + 386, + 531, + 430 + ], + "type": "text", + "content": "Camouflage Object Detection. Camouflage object detection is a challenging task in computer vision. It aims to identify objects that are designed to blend into their backgrounds, making them difficult to distinguish by human eyes or traditional detection methods. This has a wide application for the military, security, and wildlife conservation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 77, + 434, + 531, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 434, + 531, + 491 + ], + "spans": [ + { + "bbox": [ + 77, + 434, + 531, + 491 + ], + "type": "text", + "content": "As shown in Figure 55, we also include one expert model, BiRefNet [127] for reference. For all examples, both GPT-4o and Gemini 2.0 Flash can detect and segment the camouflage animals for simple cases, as shown in the last two rows. GPT-4o can also detect the specific object, given the text prompt, as shown in the first row. However, the same misalignment issues still exist. In addition, it also mixes segmentation maps (in binary masks or color masks), as shown in the last row." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "69" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 68 + }, + { + "para_blocks": [ + { + "bbox": [ + 123, + 82, + 182, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 82, + 182, + 95 + ], + "spans": [ + { + "bbox": [ + 123, + 82, + 182, + 95 + ], + "type": "text", + "content": "Image-to-X" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 156, + 106, + 167, + 120 + ], + "blocks": [ + { + "bbox": [ + 156, + 106, + 167, + 120 + ], + "lines": [ + { + "bbox": [ + 156, + 106, + 167, + 120 + ], + "spans": [ + { + "bbox": [ + 156, + 106, + 167, + 120 + ], + "type": "image", + "image_path": "35b326fb2148865594e763630192efc498f76114d6db2844fc81e1f5810bf287.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 170, + 110, + 454, + 123 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 110, + 454, + 123 + ], + "spans": [ + { + "bbox": [ + 170, + 110, + 454, + 123 + ], + "type": "text", + "content": "Evaluation: Salient Object Detection, Grouping and Shape." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 95, + 129, + 199, + 206 + ], + "blocks": [ + { + "bbox": [ + 95, + 129, + 199, + 206 + ], + "lines": [ + { + "bbox": [ + 95, + 129, + 199, + 206 + ], + "spans": [ + { + "bbox": [ + 95, + 129, + 199, + 206 + ], + "type": "image", + "image_path": "2632b121866020ffde6bdceee821e4d81430159476d96e0e01139dc9d082ba70.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 202, + 129, + 305, + 206 + ], + "blocks": [ + { + "bbox": [ + 202, + 129, + 305, + 206 + ], + "lines": [ + { + "bbox": [ + 202, + 129, + 305, + 206 + ], + "spans": [ + { + "bbox": [ + 202, + 129, + 305, + 206 + ], + "type": "image", + "image_path": "2cef513138676cfd0753561a3373c26db0d5be094be17da19006ce2882bdbd13.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 309, + 129, + 413, + 207 + ], + "blocks": [ + { + "bbox": [ + 309, + 129, + 413, + 207 + ], + "lines": [ + { + "bbox": [ + 309, + 129, + 413, + 207 + ], + "spans": [ + { + "bbox": [ + 309, + 129, + 413, + 207 + ], + "type": "image", + "image_path": "f1f8095ce03c55845db1331630f40c1812ffda931409ba5f89385376c17bfa67.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 417, + 129, + 520, + 207 + ], + "blocks": [ + { + "bbox": [ + 417, + 129, + 520, + 207 + ], + "lines": [ + { + "bbox": [ + 417, + 129, + 520, + 207 + ], + "spans": [ + { + "bbox": [ + 417, + 129, + 520, + 207 + ], + "type": "image", + "image_path": "7ba0cc1081bd824ad107543a5dbe34527a4105ea1c1e9c55a5359263cccada3f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 100, + 209, + 502, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 209, + 502, + 232 + ], + "spans": [ + { + "bbox": [ + 100, + 209, + 502, + 232 + ], + "type": "text", + "content": "Input Text: \"Give me the segmentation map of the most salient objects in this image. Return resulting image by using image generation.\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 96, + 232, + 198, + 314 + ], + "blocks": [ + { + "bbox": [ + 96, + 232, + 198, + 314 + ], + "lines": [ + { + "bbox": [ + 96, + 232, + 198, + 314 + ], + "spans": [ + { + "bbox": [ + 96, + 232, + 198, + 314 + ], + "type": "image", + "image_path": "6076be77d027d41534f7c4a4fd02e93499c2b66c1cccf37db5b69d7f522c9268.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 202, + 232, + 304, + 314 + ], + "blocks": [ + { + "bbox": [ + 202, + 232, + 304, + 314 + ], + "lines": [ + { + "bbox": [ + 202, + 232, + 304, + 314 + ], + "spans": [ + { + "bbox": [ + 202, + 232, + 304, + 314 + ], + "type": "image", + "image_path": "17e080b72e8be225cacc760b2d20b9fc982e4786f8527d6dc2ca0c3cbfbee8d2.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 309, + 232, + 412, + 314 + ], + "blocks": [ + { + "bbox": [ + 309, + 232, + 412, + 314 + ], + "lines": [ + { + "bbox": [ + 309, + 232, + 412, + 314 + ], + "spans": [ + { + "bbox": [ + 309, + 232, + 412, + 314 + ], + "type": "image", + "image_path": "8f405db10bccda82cc90e5b0887cf8f372d62c3308cf2e2f187d9b3c87f37919.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 417, + 232, + 520, + 314 + ], + "blocks": [ + { + "bbox": [ + 417, + 232, + 520, + 314 + ], + "lines": [ + { + "bbox": [ + 417, + 232, + 520, + 314 + ], + "spans": [ + { + "bbox": [ + 417, + 232, + 520, + 314 + ], + "type": "image", + "image_path": "a34351a4399d5f36a740b505bcdf6d1791a25970e7e86a6c65bfafedbf3d931e.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 100, + 316, + 503, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 316, + 503, + 338 + ], + "spans": [ + { + "bbox": [ + 100, + 316, + 503, + 338 + ], + "type": "text", + "content": "Input Text: \"Give me the segmentation map of the most salient objects in this image. Return resulting image by using image generation.\"" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 96, + 339, + 197, + 407 + ], + "blocks": [ + { + "bbox": [ + 96, + 339, + 197, + 407 + ], + "lines": [ + { + "bbox": [ + 96, + 339, + 197, + 407 + ], + "spans": [ + { + "bbox": [ + 96, + 339, + 197, + 407 + ], + "type": "image", + "image_path": "8619a8168756037dbe0d396ebd1fa868b9e5eebb02bfe0bc848ba11f025e59c5.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 576, + 533, + 620 + ], + "lines": [ + { + "bbox": [ + 77, + 576, + 533, + 620 + ], + "spans": [ + { + "bbox": [ + 77, + 576, + 533, + 620 + ], + "type": "text", + "content": "Figure 52: Task: Image to X: Salient object detection. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and BiRefNet [127]. Observation: For all examples, compared with Gemini, GPT-4o can detect related salient objects with the text prompts while Gemini can not achieve this function." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 203, + 339, + 304, + 407 + ], + "blocks": [ + { + "bbox": [ + 203, + 339, + 304, + 407 + ], + "lines": [ + { + "bbox": [ + 203, + 339, + 304, + 407 + ], + "spans": [ + { + "bbox": [ + 203, + 339, + 304, + 407 + ], + "type": "image", + "image_path": "11fb36737f5588598b2c288b162d49548d2524ca0fb7dcafb49bbd7f1ef163a6.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 309, + 339, + 412, + 407 + ], + "blocks": [ + { + "bbox": [ + 309, + 339, + 412, + 407 + ], + "lines": [ + { + "bbox": [ + 309, + 339, + 412, + 407 + ], + "spans": [ + { + "bbox": [ + 309, + 339, + 412, + 407 + ], + "type": "image", + "image_path": "8fcfecbfee8e3af8ee12f1909d0f8d6442457efaad84be98a5ae90af97d909d1.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 417, + 339, + 520, + 407 + ], + "blocks": [ + { + "bbox": [ + 417, + 339, + 520, + 407 + ], + "lines": [ + { + "bbox": [ + 417, + 339, + 520, + 407 + ], + "spans": [ + { + "bbox": [ + 417, + 339, + 520, + 407 + ], + "type": "image", + "image_path": "689143eb07d759080356a1acc014ea1f91236ddde377299fa31b78f9f34ed9bc.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 100, + 409, + 502, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 409, + 502, + 432 + ], + "spans": [ + { + "bbox": [ + 100, + 409, + 502, + 432 + ], + "type": "text", + "content": "Input Text: \"Give me the segmentation map of the most salient objects in this image. Return resulting image by using image generation.\"" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 96, + 433, + 197, + 509 + ], + "blocks": [ + { + "bbox": [ + 96, + 433, + 197, + 509 + ], + "lines": [ + { + "bbox": [ + 96, + 433, + 197, + 509 + ], + "spans": [ + { + "bbox": [ + 96, + 433, + 197, + 509 + ], + "type": "image", + "image_path": "fd89b62d113f6d9cde9f7af5c15f0c3435fb8051b9a81f77a0195174a31e7771.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 203, + 433, + 305, + 509 + ], + "blocks": [ + { + "bbox": [ + 203, + 433, + 305, + 509 + ], + "lines": [ + { + "bbox": [ + 203, + 433, + 305, + 509 + ], + "spans": [ + { + "bbox": [ + 203, + 433, + 305, + 509 + ], + "type": "image", + "image_path": "53c762f53aba99798ebe7781772fd8e13f36de30319f71548adb22664211cf14.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 310, + 433, + 412, + 509 + ], + "blocks": [ + { + "bbox": [ + 310, + 433, + 412, + 509 + ], + "lines": [ + { + "bbox": [ + 310, + 433, + 412, + 509 + ], + "spans": [ + { + "bbox": [ + 310, + 433, + 412, + 509 + ], + "type": "image", + "image_path": "c0db26c61074cc97eac7d00434065dec4cfdf52edd3efc8af38cd021e086e4d3.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 417, + 433, + 520, + 509 + ], + "blocks": [ + { + "bbox": [ + 417, + 433, + 520, + 509 + ], + "lines": [ + { + "bbox": [ + 417, + 433, + 520, + 509 + ], + "spans": [ + { + "bbox": [ + 417, + 433, + 520, + 509 + ], + "type": "image", + "image_path": "6b703d57fd8284c322132b56c18ad285d5a472bca055f06fdf9ddb620574ba3c.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 100, + 510, + 502, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 510, + 502, + 535 + ], + "spans": [ + { + "bbox": [ + 100, + 510, + 502, + 535 + ], + "type": "text", + "content": "Input Text: \"Give me the segmentation map of the most salient objects in this image. Return resulting image by using image generation.\"" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 120, + 540, + 176, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 540, + 176, + 552 + ], + "spans": [ + { + "bbox": [ + 120, + 540, + 176, + 552 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 238, + 540, + 271, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 540, + 271, + 549 + ], + "spans": [ + { + "bbox": [ + 238, + 540, + 271, + 549 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 324, + 540, + 397, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 540, + 397, + 550 + ], + "spans": [ + { + "bbox": [ + 324, + 540, + 397, + 550 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 446, + 540, + 488, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 446, + 540, + 488, + 550 + ], + "spans": [ + { + "bbox": [ + 446, + 540, + 488, + 550 + ], + "type": "text", + "content": "BiRefNet" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "70" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 69 + }, + { + "para_blocks": [ + { + "bbox": [ + 120, + 171, + 179, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 171, + 179, + 183 + ], + "spans": [ + { + "bbox": [ + 120, + 171, + 179, + 183 + ], + "type": "text", + "content": "Image-to-X" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 175, + 191, + 188, + 205 + ], + "blocks": [ + { + "bbox": [ + 175, + 191, + 188, + 205 + ], + "lines": [ + { + "bbox": [ + 175, + 191, + 188, + 205 + ], + "spans": [ + { + "bbox": [ + 175, + 191, + 188, + 205 + ], + "type": "image", + "image_path": "f93d94cd33ec493cea22f6299c05435ef6df644e620ad03fc4906616201dd94b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 189, + 194, + 436, + 208 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 194, + 436, + 208 + ], + "spans": [ + { + "bbox": [ + 189, + 194, + 436, + 208 + ], + "type": "text", + "content": "Evaluation: Mirror Detection, Grouping and Shape." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 101, + 214, + 194, + 307 + ], + "blocks": [ + { + "bbox": [ + 101, + 214, + 194, + 307 + ], + "lines": [ + { + "bbox": [ + 101, + 214, + 194, + 307 + ], + "spans": [ + { + "bbox": [ + 101, + 214, + 194, + 307 + ], + "type": "image", + "image_path": "314ecf8346962cc266f1e67d8f88be07f8a8eaca1ca9d18ed756eea986ca6a35.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 205, + 213, + 302, + 306 + ], + "blocks": [ + { + "bbox": [ + 205, + 213, + 302, + 306 + ], + "lines": [ + { + "bbox": [ + 205, + 213, + 302, + 306 + ], + "spans": [ + { + "bbox": [ + 205, + 213, + 302, + 306 + ], + "type": "image", + "image_path": "8de34136e3817eb2b912dec2f52ae54240a4d61a51e8f5d08695f60f906cb9b3.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 309, + 213, + 405, + 307 + ], + "blocks": [ + { + "bbox": [ + 309, + 213, + 405, + 307 + ], + "lines": [ + { + "bbox": [ + 309, + 213, + 405, + 307 + ], + "spans": [ + { + "bbox": [ + 309, + 213, + 405, + 307 + ], + "type": "image", + "image_path": "3a21a982cd81980080fc0bfe53df60869cb5c77ea93c8479b28806aab9298ca1.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 416, + 215, + 509, + 306 + ], + "blocks": [ + { + "bbox": [ + 416, + 215, + 509, + 306 + ], + "lines": [ + { + "bbox": [ + 416, + 215, + 509, + 306 + ], + "spans": [ + { + "bbox": [ + 416, + 215, + 509, + 306 + ], + "type": "image", + "image_path": "461d26dcff028be557e2efbcb1789125eb28fa946c8494b2dc6491aefc57fa30.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 100, + 310, + 515, + 323 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 310, + 515, + 323 + ], + "spans": [ + { + "bbox": [ + 100, + 310, + 515, + 323 + ], + "type": "text", + "content": "Input Text: \"Please segment all the mirror in the image and directly generate the output image.\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 101, + 327, + 194, + 421 + ], + "blocks": [ + { + "bbox": [ + 101, + 327, + 194, + 421 + ], + "lines": [ + { + "bbox": [ + 101, + 327, + 194, + 421 + ], + "spans": [ + { + "bbox": [ + 101, + 327, + 194, + 421 + ], + "type": "image", + "image_path": "cf3a11167038e14a620917c62c66c24cffe74d0bace22efc3b2051879d1ac82a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 205, + 327, + 302, + 421 + ], + "blocks": [ + { + "bbox": [ + 205, + 327, + 302, + 421 + ], + "lines": [ + { + "bbox": [ + 205, + 327, + 302, + 421 + ], + "spans": [ + { + "bbox": [ + 205, + 327, + 302, + 421 + ], + "type": "image", + "image_path": "1bc679f32fbc6bb7c76fac345ab8d2e03f52eb51f303c9abdaaec3738e3b8348.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 309, + 327, + 405, + 421 + ], + "blocks": [ + { + "bbox": [ + 309, + 327, + 405, + 421 + ], + "lines": [ + { + "bbox": [ + 309, + 327, + 405, + 421 + ], + "spans": [ + { + "bbox": [ + 309, + 327, + 405, + 421 + ], + "type": "image", + "image_path": "dcde7c31e72b7a5f000f1f4ce3071419d9f67361a8d18ec4966c3bb5f4513ab0.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 416, + 327, + 509, + 421 + ], + "blocks": [ + { + "bbox": [ + 416, + 327, + 509, + 421 + ], + "lines": [ + { + "bbox": [ + 416, + 327, + 509, + 421 + ], + "spans": [ + { + "bbox": [ + 416, + 327, + 509, + 421 + ], + "type": "image", + "image_path": "83a5063b90e2d85ae8723b1c0527ebfeb79cf418f10b31ccefd38f2bc7514bf1.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 101, + 422, + 517, + 434 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 101, + 422, + 517, + 434 + ], + "spans": [ + { + "bbox": [ + 101, + 422, + 517, + 434 + ], + "type": "text", + "content": "Input Text: \"Please segment all the mirror in the image and directly generate the output image.\"" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 101, + 439, + 193, + 533 + ], + "blocks": [ + { + "bbox": [ + 101, + 439, + 193, + 533 + ], + "lines": [ + { + "bbox": [ + 101, + 439, + 193, + 533 + ], + "spans": [ + { + "bbox": [ + 101, + 439, + 193, + 533 + ], + "type": "image", + "image_path": "00cde007c7ba440020df1e4e47694ace493a34ef86885baa498937144d1106a2.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 583, + 533, + 628 + ], + "lines": [ + { + "bbox": [ + 77, + 583, + 533, + 628 + ], + "spans": [ + { + "bbox": [ + 77, + 583, + 533, + 628 + ], + "type": "text", + "content": "Figure 53: Task: Image to X: Mirror detection. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and VMD [107]. Observation: For simple cases, we find that GPT-4o can carry out mirror detection, as shown in the first example. For the complex scene, it cannot work as well as VMD." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 205, + 439, + 302, + 532 + ], + "blocks": [ + { + "bbox": [ + 205, + 439, + 302, + 532 + ], + "lines": [ + { + "bbox": [ + 205, + 439, + 302, + 532 + ], + "spans": [ + { + "bbox": [ + 205, + 439, + 302, + 532 + ], + "type": "image", + "image_path": "ce4d2858b6a29f44fe079438ac4f53fe674726328c863a6f0a045947d1adac83.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 309, + 439, + 403, + 532 + ], + "blocks": [ + { + "bbox": [ + 309, + 439, + 403, + 532 + ], + "lines": [ + { + "bbox": [ + 309, + 439, + 403, + 532 + ], + "spans": [ + { + "bbox": [ + 309, + 439, + 403, + 532 + ], + "type": "image", + "image_path": "1d6b2ece4089619c8dc3c3901314db44c609491a2174435fb009e552ab953e21.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 416, + 439, + 509, + 532 + ], + "blocks": [ + { + "bbox": [ + 416, + 439, + 509, + 532 + ], + "lines": [ + { + "bbox": [ + 416, + 439, + 509, + 532 + ], + "spans": [ + { + "bbox": [ + 416, + 439, + 509, + 532 + ], + "type": "image", + "image_path": "1a402093b931cc216473bf07b0f3c0350d4d6dad3092d0e84036254347477cea.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 101, + 534, + 517, + 545 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 101, + 534, + 517, + 545 + ], + "spans": [ + { + "bbox": [ + 101, + 534, + 517, + 545 + ], + "type": "text", + "content": "Input Text: \"Please segment all the mirror in the image and directly generate the output image.\"" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 119, + 551, + 175, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 551, + 175, + 563 + ], + "spans": [ + { + "bbox": [ + 119, + 551, + 175, + 563 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 237, + 551, + 269, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 551, + 269, + 560 + ], + "spans": [ + { + "bbox": [ + 237, + 551, + 269, + 560 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 323, + 551, + 394, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 551, + 394, + 560 + ], + "spans": [ + { + "bbox": [ + 323, + 551, + 394, + 560 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 453, + 550, + 475, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 453, + 550, + 475, + 559 + ], + "spans": [ + { + "bbox": [ + 453, + 550, + 475, + 559 + ], + "type": "text", + "content": "VMD" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "71" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 70 + }, + { + "para_blocks": [ + { + "bbox": [ + 186, + 111, + 438, + 125 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 111, + 438, + 125 + ], + "spans": [ + { + "bbox": [ + 186, + 111, + 438, + 125 + ], + "type": "text", + "content": "Evaluation: Shadow Detection, Grouping and Shape." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 100, + 128, + 201, + 195 + ], + "blocks": [ + { + "bbox": [ + 100, + 128, + 201, + 195 + ], + "lines": [ + { + "bbox": [ + 100, + 128, + 201, + 195 + ], + "spans": [ + { + "bbox": [ + 100, + 128, + 201, + 195 + ], + "type": "image", + "image_path": "88eb6272e2083062c89d2da6799a7fbf0bebd2d74c908f4e8fab3e2024206b52.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 205, + 128, + 306, + 196 + ], + "blocks": [ + { + "bbox": [ + 205, + 128, + 306, + 196 + ], + "lines": [ + { + "bbox": [ + 205, + 128, + 306, + 196 + ], + "spans": [ + { + "bbox": [ + 205, + 128, + 306, + 196 + ], + "type": "image", + "image_path": "f723f0fa1807616a1f9fc4a326c92c0dbc8d41bfa409fc55bcd69f03cf0e4cc4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 309, + 128, + 411, + 196 + ], + "blocks": [ + { + "bbox": [ + 309, + 128, + 411, + 196 + ], + "lines": [ + { + "bbox": [ + 309, + 128, + 411, + 196 + ], + "spans": [ + { + "bbox": [ + 309, + 128, + 411, + 196 + ], + "type": "image", + "image_path": "50de3ca29f0a98439974b500ee80d55a3e4acb3f252738f8682accecf3be15e0.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 415, + 128, + 516, + 196 + ], + "blocks": [ + { + "bbox": [ + 415, + 128, + 516, + 196 + ], + "lines": [ + { + "bbox": [ + 415, + 128, + 516, + 196 + ], + "spans": [ + { + "bbox": [ + 415, + 128, + 516, + 196 + ], + "type": "image", + "image_path": "ba72c57d88f3c19ba187e2c20d73fd9352c5b8f47d0b13557d72b3f97860362a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 197, + 501, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 197, + 501, + 230 + ], + "spans": [ + { + "bbox": [ + 106, + 197, + 501, + 230 + ], + "type": "text", + "content": "Input Text: \"Give me with the segmentation map of the shadow in this image. Set the shadow region to white and the other regions to black. Return the resulting image using image generation.\"" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 100, + 232, + 200, + 363 + ], + "blocks": [ + { + "bbox": [ + 100, + 232, + 200, + 363 + ], + "lines": [ + { + "bbox": [ + 100, + 232, + 200, + 363 + ], + "spans": [ + { + "bbox": [ + 100, + 232, + 200, + 363 + ], + "type": "image", + "image_path": "7968ea927b3c105ef0585b8c69f50258a2601dd8140b619a7c907020835befb5.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 205, + 232, + 306, + 364 + ], + "blocks": [ + { + "bbox": [ + 205, + 232, + 306, + 364 + ], + "lines": [ + { + "bbox": [ + 205, + 232, + 306, + 364 + ], + "spans": [ + { + "bbox": [ + 205, + 232, + 306, + 364 + ], + "type": "image", + "image_path": "98fe6ae3af0e25ba30cef5b80dbf80b30b9cf636e59a4dadffc0a63864c71cdd.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 309, + 232, + 411, + 364 + ], + "blocks": [ + { + "bbox": [ + 309, + 232, + 411, + 364 + ], + "lines": [ + { + "bbox": [ + 309, + 232, + 411, + 364 + ], + "spans": [ + { + "bbox": [ + 309, + 232, + 411, + 364 + ], + "type": "image", + "image_path": "7665c0eede3baa10374170e0e9e1926291832c424771b68766df8bf95ef3f8f9.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 415, + 232, + 515, + 363 + ], + "blocks": [ + { + "bbox": [ + 415, + 232, + 515, + 363 + ], + "lines": [ + { + "bbox": [ + 415, + 232, + 515, + 363 + ], + "spans": [ + { + "bbox": [ + 415, + 232, + 515, + 363 + ], + "type": "image", + "image_path": "ce1b3984804538be38498c0aaa6fc99ea0991de8d1262b9961e409a6f9e0f3cb.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 365, + 501, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 365, + 501, + 398 + ], + "spans": [ + { + "bbox": [ + 106, + 365, + 501, + 398 + ], + "type": "text", + "content": "Input Text: \"Give me with the segmentation map of the shadow in this image. Set the shadow region to white and the other regions to black. Return the resulting image using image generation.\"" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 101, + 399, + 201, + 532 + ], + "blocks": [ + { + "bbox": [ + 101, + 399, + 201, + 532 + ], + "lines": [ + { + "bbox": [ + 101, + 399, + 201, + 532 + ], + "spans": [ + { + "bbox": [ + 101, + 399, + 201, + 532 + ], + "type": "image", + "image_path": "9a09eb88416d90950b00ecbf953c22c1a7c50188a8cd661fe6744dcc97906d76.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 690, + 533, + 735 + ], + "lines": [ + { + "bbox": [ + 77, + 690, + 533, + 735 + ], + "spans": [ + { + "bbox": [ + 77, + 690, + 533, + 735 + ], + "type": "text", + "content": "Figure 54: Task: Image to X: Shadow detection. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SDDNet [21]. Observation: For more complex examples, both models detect both objects and their shadows with one mask output, as shown in the last two rows, leading to false positive predictions." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 205, + 399, + 306, + 532 + ], + "blocks": [ + { + "bbox": [ + 205, + 399, + 306, + 532 + ], + "lines": [ + { + "bbox": [ + 205, + 399, + 306, + 532 + ], + "spans": [ + { + "bbox": [ + 205, + 399, + 306, + 532 + ], + "type": "image", + "image_path": "df7e8259254f0cce7de2b44908edba93ace97aedc79647c2a02a490d81d15f6d.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 309, + 399, + 411, + 532 + ], + "blocks": [ + { + "bbox": [ + 309, + 399, + 411, + 532 + ], + "lines": [ + { + "bbox": [ + 309, + 399, + 411, + 532 + ], + "spans": [ + { + "bbox": [ + 309, + 399, + 411, + 532 + ], + "type": "image", + "image_path": "1e842cfe1f25dd038d061f3ab7dd7da184ce007a8b89e10fd9297ee4bd1d051c.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 415, + 399, + 515, + 532 + ], + "blocks": [ + { + "bbox": [ + 415, + 399, + 515, + 532 + ], + "lines": [ + { + "bbox": [ + 415, + 399, + 515, + 532 + ], + "spans": [ + { + "bbox": [ + 415, + 399, + 515, + 532 + ], + "type": "image", + "image_path": "1b8d6a02d0d6267d2cb4821bda728d32861a5521511305a19499be24186e0a29.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 533, + 502, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 533, + 502, + 566 + ], + "spans": [ + { + "bbox": [ + 106, + 533, + 502, + 566 + ], + "type": "text", + "content": "Input Text: \"Give me with the segmentation map of the shadow in this image. Set the shadow region to white and the other regions to black. Return the resulting image using image generation.\"" + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 100, + 567, + 201, + 625 + ], + "blocks": [ + { + "bbox": [ + 100, + 567, + 201, + 625 + ], + "lines": [ + { + "bbox": [ + 100, + 567, + 201, + 625 + ], + "spans": [ + { + "bbox": [ + 100, + 567, + 201, + 625 + ], + "type": "image", + "image_path": "abb40d377e6294ce85ec725e10870be0d09e757df45ef1ac17d63772ebe95887.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 205, + 568, + 306, + 625 + ], + "blocks": [ + { + "bbox": [ + 205, + 568, + 306, + 625 + ], + "lines": [ + { + "bbox": [ + 205, + 568, + 306, + 625 + ], + "spans": [ + { + "bbox": [ + 205, + 568, + 306, + 625 + ], + "type": "image", + "image_path": "c5e565189347123ae969a22fa0002406f12393da53fa69379e6fbe9b7fbaddfa.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 309, + 568, + 411, + 625 + ], + "blocks": [ + { + "bbox": [ + 309, + 568, + 411, + 625 + ], + "lines": [ + { + "bbox": [ + 309, + 568, + 411, + 625 + ], + "spans": [ + { + "bbox": [ + 309, + 568, + 411, + 625 + ], + "type": "image", + "image_path": "087970cb94a73330a9143d8515c6bdd8a8061b49291d5ee3caa0aa8911c73064.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 415, + 569, + 515, + 625 + ], + "blocks": [ + { + "bbox": [ + 415, + 569, + 515, + 625 + ], + "lines": [ + { + "bbox": [ + 415, + 569, + 515, + 625 + ], + "spans": [ + { + "bbox": [ + 415, + 569, + 515, + 625 + ], + "type": "image", + "image_path": "55eb18aa82b40bdb5d13022c1ffe68e93f0cf3359c85faa059e9e14d3e5ff8c3.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 627, + 501, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 627, + 501, + 660 + ], + "spans": [ + { + "bbox": [ + 106, + 627, + 501, + 660 + ], + "type": "text", + "content": "Input Text: \"Give me with the segmentation map of the shadow in this image. Set the shadow region to white and the other regions to black. Return the resulting image using image generation.\"" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 126, + 662, + 180, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 662, + 180, + 673 + ], + "spans": [ + { + "bbox": [ + 126, + 662, + 180, + 673 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 241, + 662, + 274, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 662, + 274, + 672 + ], + "spans": [ + { + "bbox": [ + 241, + 662, + 274, + 672 + ], + "type": "text", + "content": "GPT-40" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 326, + 662, + 396, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 662, + 396, + 672 + ], + "spans": [ + { + "bbox": [ + 326, + 662, + 396, + 672 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 447, + 662, + 483, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 447, + 662, + 483, + 671 + ], + "spans": [ + { + "bbox": [ + 447, + 662, + 483, + 671 + ], + "type": "text", + "content": "SDDNet" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 85, + 185, + 98 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 85, + 185, + 98 + ], + "spans": [ + { + "bbox": [ + 127, + 85, + 185, + 98 + ], + "type": "text", + "content": "Image-to-X" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "72" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 71 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 143, + 160, + 156, + 174 + ], + "blocks": [ + { + "bbox": [ + 143, + 160, + 156, + 174 + ], + "lines": [ + { + "bbox": [ + 143, + 160, + 156, + 174 + ], + "spans": [ + { + "bbox": [ + 143, + 160, + 156, + 174 + ], + "type": "image", + "image_path": "0c35796bfccda9797611a7529116ead987ca3802f68e686dd8c44e01a6055d5c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 158, + 163, + 466, + 176 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 158, + 163, + 466, + 176 + ], + "spans": [ + { + "bbox": [ + 158, + 163, + 466, + 176 + ], + "type": "text", + "content": "Evaluation: Camouflage Object Detection, Grouping and Shape." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 95, + 183, + 198, + 258 + ], + "blocks": [ + { + "bbox": [ + 95, + 183, + 198, + 258 + ], + "lines": [ + { + "bbox": [ + 95, + 183, + 198, + 258 + ], + "spans": [ + { + "bbox": [ + 95, + 183, + 198, + 258 + ], + "type": "image", + "image_path": "f58d9105160e737da34562b6c9c09dca8743d388c9ac9c2f0b8894657da4a521.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 100, + 260, + 495, + 283 + ], + "lines": [ + { + "bbox": [ + 100, + 260, + 495, + 283 + ], + "spans": [ + { + "bbox": [ + 100, + 260, + 495, + 283 + ], + "type": "text", + "content": "Input Text: \"Give me the segmentation map of the crocodile in this image. Return resulting image by using image generation.\"" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 203, + 183, + 304, + 258 + ], + "blocks": [ + { + "bbox": [ + 203, + 183, + 304, + 258 + ], + "lines": [ + { + "bbox": [ + 203, + 183, + 304, + 258 + ], + "spans": [ + { + "bbox": [ + 203, + 183, + 304, + 258 + ], + "type": "image", + "image_path": "f77b58aea411a06d091798fd19812f2d11b864849cf60dea1419dbb96bdeab4b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 309, + 183, + 411, + 258 + ], + "blocks": [ + { + "bbox": [ + 309, + 183, + 411, + 258 + ], + "lines": [ + { + "bbox": [ + 309, + 183, + 411, + 258 + ], + "spans": [ + { + "bbox": [ + 309, + 183, + 411, + 258 + ], + "type": "image", + "image_path": "36ba8480160ce7a11902f9d63c93a779ad81abc66d85c984fe46a402a46397ff.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 417, + 183, + 519, + 258 + ], + "blocks": [ + { + "bbox": [ + 417, + 183, + 519, + 258 + ], + "lines": [ + { + "bbox": [ + 417, + 183, + 519, + 258 + ], + "spans": [ + { + "bbox": [ + 417, + 183, + 519, + 258 + ], + "type": "image", + "image_path": "628f0ed3fe5c2872259b8c53ed502d9c944f34e65615f8483a616fc2cad70310.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 96, + 287, + 197, + 365 + ], + "blocks": [ + { + "bbox": [ + 96, + 287, + 197, + 365 + ], + "lines": [ + { + "bbox": [ + 96, + 287, + 197, + 365 + ], + "spans": [ + { + "bbox": [ + 96, + 287, + 197, + 365 + ], + "type": "image", + "image_path": "b3d11b71a64fbc57f21f8ab46676a1e3eb0408ddfc762cacc7d3fc67d895124c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 100, + 369, + 500, + 392 + ], + "lines": [ + { + "bbox": [ + 100, + 369, + 500, + 392 + ], + "spans": [ + { + "bbox": [ + 100, + 369, + 500, + 392 + ], + "type": "text", + "content": "Input Text: \"Give me the segmentation map of the fish in this image. Return resulting image by using image generation.\"" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 203, + 287, + 304, + 365 + ], + "blocks": [ + { + "bbox": [ + 203, + 287, + 304, + 365 + ], + "lines": [ + { + "bbox": [ + 203, + 287, + 304, + 365 + ], + "spans": [ + { + "bbox": [ + 203, + 287, + 304, + 365 + ], + "type": "image", + "image_path": "73484faa2e3dc80819041f2a89b22046ef3ff187c2c48e51c3cfc6d540a16621.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 309, + 287, + 411, + 365 + ], + "blocks": [ + { + "bbox": [ + 309, + 287, + 411, + 365 + ], + "lines": [ + { + "bbox": [ + 309, + 287, + 411, + 365 + ], + "spans": [ + { + "bbox": [ + 309, + 287, + 411, + 365 + ], + "type": "image", + "image_path": "8a361a39ea1256599aa45e581bc05adce2bd4ba199154e81ac85761e04a5d305.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 417, + 287, + 519, + 365 + ], + "blocks": [ + { + "bbox": [ + 417, + 287, + 519, + 365 + ], + "lines": [ + { + "bbox": [ + 417, + 287, + 519, + 365 + ], + "spans": [ + { + "bbox": [ + 417, + 287, + 519, + 365 + ], + "type": "image", + "image_path": "24ab3a835dd0ae72fb511f6dddb64f64bce8bf595463eb6cff06f05aa41f50a1.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 96, + 394, + 197, + 442 + ], + "blocks": [ + { + "bbox": [ + 96, + 394, + 197, + 442 + ], + "lines": [ + { + "bbox": [ + 96, + 394, + 197, + 442 + ], + "spans": [ + { + "bbox": [ + 96, + 394, + 197, + 442 + ], + "type": "image", + "image_path": "04a1ddeb73159d510035c4040bc6dea5a7b55b6b32df8201b3f02a9a20621b00.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 100, + 446, + 500, + 470 + ], + "lines": [ + { + "bbox": [ + 100, + 446, + 500, + 470 + ], + "spans": [ + { + "bbox": [ + 100, + 446, + 500, + 470 + ], + "type": "text", + "content": "Input Text: \"Give me the segmentation map of the fish in this image. Return resulting image by using image generation.\"" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 203, + 394, + 304, + 442 + ], + "blocks": [ + { + "bbox": [ + 203, + 394, + 304, + 442 + ], + "lines": [ + { + "bbox": [ + 203, + 394, + 304, + 442 + ], + "spans": [ + { + "bbox": [ + 203, + 394, + 304, + 442 + ], + "type": "image", + "image_path": "0964f3c1bd27ee97370c3070e8b80b145333e5ecc74f44dff7f9bad2234476a9.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 309, + 394, + 411, + 442 + ], + "blocks": [ + { + "bbox": [ + 309, + 394, + 411, + 442 + ], + "lines": [ + { + "bbox": [ + 309, + 394, + 411, + 442 + ], + "spans": [ + { + "bbox": [ + 309, + 394, + 411, + 442 + ], + "type": "image", + "image_path": "db93f8cdc75167415d8ec0a2d7ba0ce8bac4ffe433a2ff11a79751a9deb5cc51.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 620, + 532, + 665 + ], + "lines": [ + { + "bbox": [ + 77, + 620, + 532, + 665 + ], + "spans": [ + { + "bbox": [ + 77, + 620, + 532, + 665 + ], + "type": "text", + "content": "Figure 55: Task: Image to X: Camouflage object detection. Evaluate the grouping and shape analysis ability. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and BiRefNet [127]. Observation: Both GPT-4o and Gemini 2.0 Flash can detect and segment the camouflage animals for simple cases. However, the spatial misalignments still exist." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 417, + 394, + 519, + 442 + ], + "blocks": [ + { + "bbox": [ + 417, + 394, + 519, + 442 + ], + "lines": [ + { + "bbox": [ + 417, + 394, + 519, + 442 + ], + "spans": [ + { + "bbox": [ + 417, + 394, + 519, + 442 + ], + "type": "image", + "image_path": "cc09b3c0d84509e3bc312aabe02473313c5d4fecd18fd590ee4eaa3bf1153770.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 96, + 472, + 198, + 548 + ], + "blocks": [ + { + "bbox": [ + 96, + 472, + 198, + 548 + ], + "lines": [ + { + "bbox": [ + 96, + 472, + 198, + 548 + ], + "spans": [ + { + "bbox": [ + 96, + 472, + 198, + 548 + ], + "type": "image", + "image_path": "50ee1a066627ca3b776f328349447b722e1083477451cb97c6b6bfb2c2054b04.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 100, + 552, + 502, + 576 + ], + "lines": [ + { + "bbox": [ + 100, + 552, + 502, + 576 + ], + "spans": [ + { + "bbox": [ + 100, + 552, + 502, + 576 + ], + "type": "text", + "content": "Input Text: \"Give me the segmentation map of the toad in this image. Return resulting image by using image generation.\"" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 203, + 472, + 306, + 548 + ], + "blocks": [ + { + "bbox": [ + 203, + 472, + 306, + 548 + ], + "lines": [ + { + "bbox": [ + 203, + 472, + 306, + 548 + ], + "spans": [ + { + "bbox": [ + 203, + 472, + 306, + 548 + ], + "type": "image", + "image_path": "72211f5127f3ea905b058ad3d92ebb6f194bea63cc537e32cd09a144926d10d7.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 309, + 472, + 411, + 548 + ], + "blocks": [ + { + "bbox": [ + 309, + 472, + 411, + 548 + ], + "lines": [ + { + "bbox": [ + 309, + 472, + 411, + 548 + ], + "spans": [ + { + "bbox": [ + 309, + 472, + 411, + 548 + ], + "type": "image", + "image_path": "44a15d6fa321c083f59b0c39c8f37dd0512f34e39c60f7e70976d6279609306a.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 417, + 472, + 519, + 548 + ], + "blocks": [ + { + "bbox": [ + 417, + 472, + 519, + 548 + ], + "lines": [ + { + "bbox": [ + 417, + 472, + 519, + 548 + ], + "spans": [ + { + "bbox": [ + 417, + 472, + 519, + 548 + ], + "type": "image", + "image_path": "ca7cc128d79c704808bc1637b90a08b777ee9b0baab8865ec66e337610c7be1e.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 120, + 582, + 176, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 582, + 176, + 594 + ], + "spans": [ + { + "bbox": [ + 120, + 582, + 176, + 594 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 238, + 582, + 271, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 582, + 271, + 592 + ], + "spans": [ + { + "bbox": [ + 238, + 582, + 271, + 592 + ], + "type": "text", + "content": "GPT-40" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 325, + 582, + 397, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 582, + 397, + 592 + ], + "spans": [ + { + "bbox": [ + 325, + 582, + 397, + 592 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 446, + 582, + 487, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 446, + 582, + 487, + 592 + ], + "spans": [ + { + "bbox": [ + 446, + 582, + 487, + 592 + ], + "type": "text", + "content": "BiRefNet" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 123, + 135, + 182, + 148 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 135, + 182, + 148 + ], + "spans": [ + { + "bbox": [ + 123, + 135, + 182, + 148 + ], + "type": "text", + "content": "Image-to-X" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "73" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 72 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 187, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 187, + 85 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 187, + 85 + ], + "type": "text", + "content": "2.4.4 Depth Estimation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 91, + 534, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 91, + 534, + 257 + ], + "spans": [ + { + "bbox": [ + 77, + 91, + 534, + 257 + ], + "type": "text", + "content": "The depth estimation task involves predicting the distance from the camera to objects within a scene. In this paper, we focus on monocular depth estimation, which takes a single image as input. In Figure 56, we compare GPT-4o, Gemini 2.0 Flash, and a recent SOTA method, Depth-Anything [114]. We first notice that Gemini cannot produce reasonable depth estimations. For GPT-4o, although it can output a fancy depth map visualization, we want to point out that this output is a grayscale visualization of depth estimation and cannot be directly converted to the depth of each pixel. We show mainly five cases. In the first test case, we notice that GPT-4o is good at capturing details in images, which Depth-Anything may not be good at. Although we cannot directly determine the accuracy of the depth value, we can judge from the visualization that the depth relationship between objects is accurate. What GPT-4o cannot do well is the background. Since the background in the image is the sky, we can infer from common sense that these areas are infinitely far away from the camera. However, the depth map output of GPT-4o does not handle these areas correctly. GPT-4o performs similarly in the second, fourth, and fifth examples. Among them, we would like to emphasize the fourth test case, since for buildings farther away, GPT-4o has no way to effectively analyze the distance between each building and the camera. In the third example, although the output of GPT-4o is very confusing, it completely misunderstands the depth relationship of the entire image. Therefore, we believe that the depth estimation performance of GPT-4o is still unstable." + } + ] + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "74" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 73 + }, + { + "para_blocks": [ + { + "bbox": [ + 113, + 118, + 173, + 131 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 118, + 173, + 131 + ], + "spans": [ + { + "bbox": [ + 113, + 118, + 173, + 131 + ], + "type": "text", + "content": "Image-to-X" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 213, + 139, + 227, + 154 + ], + "blocks": [ + { + "bbox": [ + 213, + 139, + 227, + 154 + ], + "lines": [ + { + "bbox": [ + 213, + 139, + 227, + 154 + ], + "spans": [ + { + "bbox": [ + 213, + 139, + 227, + 154 + ], + "type": "image", + "image_path": "28038c53ecd04cd0c7f8a9e7042077716a5c924fd1bcd4bdb4dc296a5e381da1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 228, + 142, + 384, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 142, + 384, + 156 + ], + "spans": [ + { + "bbox": [ + 228, + 142, + 384, + 156 + ], + "type": "text", + "content": "Evaluation: Depth Estimation" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 86, + 161, + 192, + 232 + ], + "blocks": [ + { + "bbox": [ + 86, + 161, + 192, + 232 + ], + "lines": [ + { + "bbox": [ + 86, + 161, + 192, + 232 + ], + "spans": [ + { + "bbox": [ + 86, + 161, + 192, + 232 + ], + "type": "image", + "image_path": "c0365698147e513e49bef371ac74ee5fb93c63b6c1496ce9ac4635ce65cd901e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 197, + 161, + 302, + 232 + ], + "blocks": [ + { + "bbox": [ + 197, + 161, + 302, + 232 + ], + "lines": [ + { + "bbox": [ + 197, + 161, + 302, + 232 + ], + "spans": [ + { + "bbox": [ + 197, + 161, + 302, + 232 + ], + "type": "image", + "image_path": "607880b57a1f83f7d1721514860baaaa5af0252138afb508b72a65611fc2b893.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 308, + 161, + 415, + 232 + ], + "blocks": [ + { + "bbox": [ + 308, + 161, + 415, + 232 + ], + "lines": [ + { + "bbox": [ + 308, + 161, + 415, + 232 + ], + "spans": [ + { + "bbox": [ + 308, + 161, + 415, + 232 + ], + "type": "image", + "image_path": "5d70486874bdca6ec3533ff018df819168b3864b389e7def95b35ea1d5fd3941.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 418, + 161, + 524, + 232 + ], + "blocks": [ + { + "bbox": [ + 418, + 161, + 524, + 232 + ], + "lines": [ + { + "bbox": [ + 418, + 161, + 524, + 232 + ], + "spans": [ + { + "bbox": [ + 418, + 161, + 524, + 232 + ], + "type": "image", + "image_path": "1d5ece3f0e15c3834d92639316498ad868ae535068088c79e13b0ab393f5994b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 97, + 232, + 424, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 232, + 424, + 246 + ], + "spans": [ + { + "bbox": [ + 97, + 232, + 424, + 246 + ], + "type": "text", + "content": "Input Text: \"Please generate the depth map prediction of this image.\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 86, + 250, + 192, + 321 + ], + "blocks": [ + { + "bbox": [ + 86, + 250, + 192, + 321 + ], + "lines": [ + { + "bbox": [ + 86, + 250, + 192, + 321 + ], + "spans": [ + { + "bbox": [ + 86, + 250, + 192, + 321 + ], + "type": "image", + "image_path": "96954093e4eb35d0ca710e8119a031cdc99bc1453c9ec711f72072006f7ffbbb.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 197, + 250, + 301, + 320 + ], + "blocks": [ + { + "bbox": [ + 197, + 250, + 301, + 320 + ], + "lines": [ + { + "bbox": [ + 197, + 250, + 301, + 320 + ], + "spans": [ + { + "bbox": [ + 197, + 250, + 301, + 320 + ], + "type": "image", + "image_path": "c13099707bf9fe0d5399ee37e5da600f88c1036e16f352a6e9ded8e89a045d2e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 308, + 250, + 414, + 320 + ], + "blocks": [ + { + "bbox": [ + 308, + 250, + 414, + 320 + ], + "lines": [ + { + "bbox": [ + 308, + 250, + 414, + 320 + ], + "spans": [ + { + "bbox": [ + 308, + 250, + 414, + 320 + ], + "type": "image", + "image_path": "4b28cc6dcc84678e3173e8fa682a9dc064ea0f9777f5f93daeba4307f5cb80ec.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 419, + 250, + 523, + 321 + ], + "blocks": [ + { + "bbox": [ + 419, + 250, + 523, + 321 + ], + "lines": [ + { + "bbox": [ + 419, + 250, + 523, + 321 + ], + "spans": [ + { + "bbox": [ + 419, + 250, + 523, + 321 + ], + "type": "image", + "image_path": "ab5d35fd29583376a884277394b92fa4599215c40d88cdcc94c6b9c9bd93f1ae.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 97, + 321, + 424, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 321, + 424, + 334 + ], + "spans": [ + { + "bbox": [ + 97, + 321, + 424, + 334 + ], + "type": "text", + "content": "Input Text: \"Please generate the depth map prediction of this image.\"" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 86, + 337, + 192, + 407 + ], + "blocks": [ + { + "bbox": [ + 86, + 337, + 192, + 407 + ], + "lines": [ + { + "bbox": [ + 86, + 337, + 192, + 407 + ], + "spans": [ + { + "bbox": [ + 86, + 337, + 192, + 407 + ], + "type": "image", + "image_path": "e822d81326618b0d941cb4c3271bef0fd0548efa0ef06244f8b36e4b001ccf7a.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 197, + 338, + 302, + 407 + ], + "blocks": [ + { + "bbox": [ + 197, + 338, + 302, + 407 + ], + "lines": [ + { + "bbox": [ + 197, + 338, + 302, + 407 + ], + "spans": [ + { + "bbox": [ + 197, + 338, + 302, + 407 + ], + "type": "image", + "image_path": "76849dc2c4d82c6863264e63ab6e8135e1109cad0f3635b978fc71a1be56563d.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 308, + 338, + 414, + 407 + ], + "blocks": [ + { + "bbox": [ + 308, + 338, + 414, + 407 + ], + "lines": [ + { + "bbox": [ + 308, + 338, + 414, + 407 + ], + "spans": [ + { + "bbox": [ + 308, + 338, + 414, + 407 + ], + "type": "image", + "image_path": "c1871f48314d2af2fd2dfd3ce2bf7c8ce6dd5d5432b0c3771e9206b1cb6dbbef.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 419, + 338, + 524, + 407 + ], + "blocks": [ + { + "bbox": [ + 419, + 338, + 524, + 407 + ], + "lines": [ + { + "bbox": [ + 419, + 338, + 524, + 407 + ], + "spans": [ + { + "bbox": [ + 419, + 338, + 524, + 407 + ], + "type": "image", + "image_path": "40f481609f3f72c80c1bb3a1911a2bcc98e71fe948326192df5cb8ed5f01b0a6.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 97, + 410, + 424, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 410, + 424, + 423 + ], + "spans": [ + { + "bbox": [ + 97, + 410, + 424, + 423 + ], + "type": "text", + "content": "Input Text: \"Please generate the depth map prediction of this image.\"" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 86, + 426, + 192, + 493 + ], + "blocks": [ + { + "bbox": [ + 86, + 426, + 192, + 493 + ], + "lines": [ + { + "bbox": [ + 86, + 426, + 192, + 493 + ], + "spans": [ + { + "bbox": [ + 86, + 426, + 192, + 493 + ], + "type": "image", + "image_path": "091b9e25759363e18a4a8027407ebff16dae2c1d7196bfd2671970b2dfdf18dc.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 626, + 533, + 682 + ], + "lines": [ + { + "bbox": [ + 77, + 626, + 533, + 682 + ], + "spans": [ + { + "bbox": [ + 77, + 626, + 533, + 682 + ], + "type": "text", + "content": "Figure 56: Task: Image to X: Depth estimation. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Depth-Anything [114]. Observation: We convert the depth map generated by Depth-Anything into a visualization map similar to GPT-4o. This evaluation shows that GPT-4o has the capability of distinguishing the depth relationship of different parts in the image, but its understanding of the background is insufficient." + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 197, + 426, + 302, + 493 + ], + "blocks": [ + { + "bbox": [ + 197, + 426, + 302, + 493 + ], + "lines": [ + { + "bbox": [ + 197, + 426, + 302, + 493 + ], + "spans": [ + { + "bbox": [ + 197, + 426, + 302, + 493 + ], + "type": "image", + "image_path": "eb86efcba3a9c240da3e722bda20727de1f615bfe422fab64ff3b091cd85df06.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 308, + 426, + 414, + 492 + ], + "blocks": [ + { + "bbox": [ + 308, + 426, + 414, + 492 + ], + "lines": [ + { + "bbox": [ + 308, + 426, + 414, + 492 + ], + "spans": [ + { + "bbox": [ + 308, + 426, + 414, + 492 + ], + "type": "image", + "image_path": "042e2b1b154968fc9607cbadf6e30a37999a81fe9a06bc6f75a82aeaafc4d979.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 419, + 426, + 524, + 493 + ], + "blocks": [ + { + "bbox": [ + 419, + 426, + 524, + 493 + ], + "lines": [ + { + "bbox": [ + 419, + 426, + 524, + 493 + ], + "spans": [ + { + "bbox": [ + 419, + 426, + 524, + 493 + ], + "type": "image", + "image_path": "d0fbcde1d1f0ba76a9c49ab60586f0a6bd39c2d0d6c38d24f1164c0b482dacaa.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 99, + 494, + 425, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 494, + 425, + 506 + ], + "spans": [ + { + "bbox": [ + 99, + 494, + 425, + 506 + ], + "type": "text", + "content": "Input Text: \"Please generate the depth map prediction of this image.\"" + } + ] + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 86, + 510, + 192, + 580 + ], + "blocks": [ + { + "bbox": [ + 86, + 510, + 192, + 580 + ], + "lines": [ + { + "bbox": [ + 86, + 510, + 192, + 580 + ], + "spans": [ + { + "bbox": [ + 86, + 510, + 192, + 580 + ], + "type": "image", + "image_path": "7b4abe90aad35a4b1cd796581e661c9ef27e071aff9b1800f3b7341066fed935.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 197, + 510, + 302, + 580 + ], + "blocks": [ + { + "bbox": [ + 197, + 510, + 302, + 580 + ], + "lines": [ + { + "bbox": [ + 197, + 510, + 302, + 580 + ], + "spans": [ + { + "bbox": [ + 197, + 510, + 302, + 580 + ], + "type": "image", + "image_path": "2f8dd6355dcb71362e42158e49ed0aac5ddd056ed4d4516867721782b9b2a0b2.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 308, + 510, + 414, + 580 + ], + "blocks": [ + { + "bbox": [ + 308, + 510, + 414, + 580 + ], + "lines": [ + { + "bbox": [ + 308, + 510, + 414, + 580 + ], + "spans": [ + { + "bbox": [ + 308, + 510, + 414, + 580 + ], + "type": "image", + "image_path": "07fe029890bbeded3efc202fc24a6394fb11dda52778c7ad9cd6a134088f6ae9.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 419, + 510, + 523, + 580 + ], + "blocks": [ + { + "bbox": [ + 419, + 510, + 523, + 580 + ], + "lines": [ + { + "bbox": [ + 419, + 510, + 523, + 580 + ], + "spans": [ + { + "bbox": [ + 419, + 510, + 523, + 580 + ], + "type": "image", + "image_path": "cce42e0f3762b76b5639e2686fa7e95138a6dafc58db634f3dc3c0d47aebd717.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "bbox": [ + 99, + 581, + 427, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 581, + 427, + 594 + ], + "spans": [ + { + "bbox": [ + 99, + 581, + 427, + 594 + ], + "type": "text", + "content": "Input Text: \"Please generate the depth map prediction of this image.\"" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 111, + 600, + 171, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 600, + 171, + 613 + ], + "spans": [ + { + "bbox": [ + 111, + 600, + 171, + 613 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 233, + 602, + 269, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 602, + 269, + 613 + ], + "spans": [ + { + "bbox": [ + 233, + 602, + 269, + 613 + ], + "type": "text", + "content": "GPT-40" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 321, + 602, + 399, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 602, + 399, + 614 + ], + "spans": [ + { + "bbox": [ + 321, + 602, + 399, + 614 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 433, + 602, + 510, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 433, + 602, + 510, + 615 + ], + "spans": [ + { + "bbox": [ + 433, + 602, + 510, + 615 + ], + "type": "text", + "content": "Depth-Anything" + } + ] + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "75" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 74 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 194, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 194, + 83 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 194, + 83 + ], + "type": "text", + "content": "2.4.5 Normal Estimation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 91, + 533, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 91, + 533, + 180 + ], + "spans": [ + { + "bbox": [ + 77, + 91, + 533, + 180 + ], + "type": "text", + "content": "The surface normal estimation task involves predicting the orientation of surfaces at each pixel in an image, typically represented as 3D vectors. In Figure 57, we compare GPT-4o, Gemini 2.0 Flash, and Marigold normals [48]. The results show that GPT-4o can generate reasonable results. However, since GPT-4o's output is an appealing normal map visualization, we want to clarify that this output is a color-coded visualization and does not directly provide the exact normal vector for each pixel. Thus, we cannot use lighting or other methods to verify the accuracy of the normal maps, and downstream tasks cannot use the output results. However, we also find some unreasonable details. In the third test case, common sense suggests that the ground should be flat, but GPT-4o predicts normals for these textured areas that differ from the surrounding areas." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 131, + 200, + 190, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 200, + 190, + 213 + ], + "spans": [ + { + "bbox": [ + 131, + 200, + 190, + 213 + ], + "type": "text", + "content": "Image-to-X" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 230, + 224, + 397, + 237 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 224, + 397, + 237 + ], + "spans": [ + { + "bbox": [ + 230, + 224, + 397, + 237 + ], + "type": "text", + "content": "Evaluation: Consistency/accuracy." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 89, + 239, + 187, + 346 + ], + "blocks": [ + { + "bbox": [ + 89, + 239, + 187, + 346 + ], + "lines": [ + { + "bbox": [ + 89, + 239, + 187, + 346 + ], + "spans": [ + { + "bbox": [ + 89, + 239, + 187, + 346 + ], + "type": "image", + "image_path": "652932d800457a58f1281b3ba28fa3df0773930a60325d39e942f44b98045c8e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 89, + 347, + 188, + 483 + ], + "blocks": [ + { + "bbox": [ + 89, + 347, + 188, + 483 + ], + "lines": [ + { + "bbox": [ + 89, + 347, + 188, + 483 + ], + "spans": [ + { + "bbox": [ + 89, + 347, + 188, + 483 + ], + "type": "image", + "image_path": "e099a5ea018e11845a670b91da9b9e084bcfb35931d3f4ead8ac754370ad78b3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 91, + 485, + 187, + 602 + ], + "blocks": [ + { + "bbox": [ + 91, + 485, + 187, + 602 + ], + "lines": [ + { + "bbox": [ + 91, + 485, + 187, + 602 + ], + "spans": [ + { + "bbox": [ + 91, + 485, + 187, + 602 + ], + "type": "image", + "image_path": "946a58c00f3ff3a1895e1b765823d31d38f03bbcd704ed7183effd358ebae58a.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 163, + 607, + 441, + 619 + ], + "lines": [ + { + "bbox": [ + 163, + 607, + 441, + 619 + ], + "spans": [ + { + "bbox": [ + 163, + 607, + 441, + 619 + ], + "type": "text", + "content": "Input Text: \"Generate the surface normal map of this picture.\"" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 198, + 239, + 294, + 346 + ], + "blocks": [ + { + "bbox": [ + 198, + 239, + 294, + 346 + ], + "lines": [ + { + "bbox": [ + 198, + 239, + 294, + 346 + ], + "spans": [ + { + "bbox": [ + 198, + 239, + 294, + 346 + ], + "type": "image", + "image_path": "583b65ff347f460c795a71a3cedff21e9e48ec2217a26a74271927b6aa96f5d5.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 198, + 347, + 294, + 483 + ], + "blocks": [ + { + "bbox": [ + 198, + 347, + 294, + 483 + ], + "lines": [ + { + "bbox": [ + 198, + 347, + 294, + 483 + ], + "spans": [ + { + "bbox": [ + 198, + 347, + 294, + 483 + ], + "type": "image", + "image_path": "e4c0179dfb831279ce07771cfe8cbac48857d6e750c966acbf75a9fd4eaf0a65.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 199, + 485, + 294, + 603 + ], + "blocks": [ + { + "bbox": [ + 199, + 485, + 294, + 603 + ], + "lines": [ + { + "bbox": [ + 199, + 485, + 294, + 603 + ], + "spans": [ + { + "bbox": [ + 199, + 485, + 294, + 603 + ], + "type": "image", + "image_path": "396cc4efe9e599f8f4c4b1bad0da4de3244d5eeed20ffeb09471237218d5adde.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 661, + 533, + 706 + ], + "lines": [ + { + "bbox": [ + 77, + 661, + 533, + 706 + ], + "spans": [ + { + "bbox": [ + 77, + 661, + 533, + 706 + ], + "type": "text", + "content": "Figure 57: Task: Image to X: Normal estimation. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and Marigold [48]. Observation: This evaluation shows that GPT-4o has the capability of generating a visualization map of the surface normal, but the understanding of the details is still insufficient." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 306, + 239, + 402, + 346 + ], + "blocks": [ + { + "bbox": [ + 306, + 239, + 402, + 346 + ], + "lines": [ + { + "bbox": [ + 306, + 239, + 402, + 346 + ], + "spans": [ + { + "bbox": [ + 306, + 239, + 402, + 346 + ], + "type": "image", + "image_path": "9d509d18edc90347ae9f6dcacd7565656a476da05b9ef46fdf5918a71a428c43.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 306, + 348, + 402, + 483 + ], + "blocks": [ + { + "bbox": [ + 306, + 348, + 402, + 483 + ], + "lines": [ + { + "bbox": [ + 306, + 348, + 402, + 483 + ], + "spans": [ + { + "bbox": [ + 306, + 348, + 402, + 483 + ], + "type": "image", + "image_path": "d2aadb5f85512222f6151124f2d78a026acca8ece755c80d998f65be5f2e8ae8.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 306, + 485, + 402, + 603 + ], + "blocks": [ + { + "bbox": [ + 306, + 485, + 402, + 603 + ], + "lines": [ + { + "bbox": [ + 306, + 485, + 402, + 603 + ], + "spans": [ + { + "bbox": [ + 306, + 485, + 402, + 603 + ], + "type": "image", + "image_path": "5d604baed8d657358ea3a3aa09a0796cbab115ba03ae7dc21bef555277a6b92c.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 414, + 238, + 512, + 346 + ], + "blocks": [ + { + "bbox": [ + 414, + 238, + 512, + 346 + ], + "lines": [ + { + "bbox": [ + 414, + 238, + 512, + 346 + ], + "spans": [ + { + "bbox": [ + 414, + 238, + 512, + 346 + ], + "type": "image", + "image_path": "e324eb93d8c4dfceef9d783883e1f613acfba48879633c0d04fd8039c2f4ba20.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 415, + 348, + 512, + 483 + ], + "blocks": [ + { + "bbox": [ + 415, + 348, + 512, + 483 + ], + "lines": [ + { + "bbox": [ + 415, + 348, + 512, + 483 + ], + "spans": [ + { + "bbox": [ + 415, + 348, + 512, + 483 + ], + "type": "image", + "image_path": "c748048db9bc4e6543e0723fd2782186695eaa29379aa62ac724941790f330f9.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 415, + 485, + 512, + 604 + ], + "blocks": [ + { + "bbox": [ + 415, + 485, + 512, + 604 + ], + "lines": [ + { + "bbox": [ + 415, + 485, + 512, + 604 + ], + "spans": [ + { + "bbox": [ + 415, + 485, + 512, + 604 + ], + "type": "image", + "image_path": "69d386fd0754ccef0b8aa405ddd5368febd5edd73da8504508220e1ee0fb3ce3.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 112, + 628, + 169, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 628, + 169, + 640 + ], + "spans": [ + { + "bbox": [ + 112, + 628, + 169, + 640 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 223, + 628, + 257, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 628, + 257, + 639 + ], + "spans": [ + { + "bbox": [ + 223, + 628, + 257, + 639 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 628, + 383, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 628, + 383, + 639 + ], + "spans": [ + { + "bbox": [ + 310, + 628, + 383, + 639 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 445, + 628, + 485, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 445, + 628, + 485, + 640 + ], + "spans": [ + { + "bbox": [ + 445, + 628, + 485, + 640 + ], + "type": "text", + "content": "Marigold" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "76" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 75 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 185, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 185, + 83 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 185, + 83 + ], + "type": "text", + "content": "2.4.6 Layout Detection" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 91, + 533, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 91, + 533, + 157 + ], + "spans": [ + { + "bbox": [ + 77, + 91, + 533, + 157 + ], + "type": "text", + "content": "The layout detection task requires the model to identify structural components (e.g., titles, paragraphs, tables, images) in the given image. In Figure 58, we compare the performance of GPT-4o, Gemini 2.0 Flash, and LayoutLMV3 [44] on the layout detection task. In the test cases, GPT-4o hallucinates layout elements that do not exist, although the final output is another document with \"layout detection\" results. If we consider the use in downstream tasks, such results are meaningless. Therefore, we conclude that GPT-4o is not capable of the layout detection task." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 117, + 175, + 177, + 187 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 175, + 177, + 187 + ], + "spans": [ + { + "bbox": [ + 117, + 175, + 177, + 187 + ], + "type": "text", + "content": "Image-to-X" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 216, + 196, + 228, + 209 + ], + "blocks": [ + { + "bbox": [ + 216, + 196, + 228, + 209 + ], + "lines": [ + { + "bbox": [ + 216, + 196, + 228, + 209 + ], + "spans": [ + { + "bbox": [ + 216, + 196, + 228, + 209 + ], + "type": "image", + "image_path": "4ef72f42ea0ddf4ee1ec0367601e96f40b0391ec4c9fa27f967f8b50716f6305.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 230, + 199, + 394, + 211 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 199, + 394, + 211 + ], + "spans": [ + { + "bbox": [ + 230, + 199, + 394, + 211 + ], + "type": "text", + "content": "Evaluation: Document Detection." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 90, + 219, + 194, + 327 + ], + "blocks": [ + { + "bbox": [ + 90, + 219, + 194, + 327 + ], + "lines": [ + { + "bbox": [ + 90, + 219, + 194, + 327 + ], + "spans": [ + { + "bbox": [ + 90, + 219, + 194, + 327 + ], + "type": "image", + "image_path": "2e8258faee6378598f378a9a63923d7d64ed24c4a8d936ba6407f7a1752ce76a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 471, + 533, + 505 + ], + "lines": [ + { + "bbox": [ + 77, + 471, + 533, + 505 + ], + "spans": [ + { + "bbox": [ + 77, + 471, + 533, + 505 + ], + "type": "text", + "content": "Figure 58: Task: Image to X: Layout detection. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and LayoutLMV3 [44]. Observation: The results show that GPT-4o and Gemini frequently generate a different document but a correct detected layout." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 204, + 219, + 284, + 327 + ], + "blocks": [ + { + "bbox": [ + 204, + 219, + 284, + 327 + ], + "lines": [ + { + "bbox": [ + 204, + 219, + 284, + 327 + ], + "spans": [ + { + "bbox": [ + 204, + 219, + 284, + 327 + ], + "type": "image", + "image_path": "235e18e587aa841842c1dc8ffd53a330c1345d64b6c3875f51db1f215dc6fc92.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 293, + 219, + 392, + 308 + ], + "blocks": [ + { + "bbox": [ + 293, + 219, + 392, + 308 + ], + "lines": [ + { + "bbox": [ + 293, + 219, + 392, + 308 + ], + "spans": [ + { + "bbox": [ + 293, + 219, + 392, + 308 + ], + "type": "image", + "image_path": "37e0cb1ddf2d5338758f440584a48a5f50fe2d534e9b2486a05a7690ed334ed9.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 400, + 219, + 518, + 308 + ], + "blocks": [ + { + "bbox": [ + 400, + 219, + 518, + 308 + ], + "lines": [ + { + "bbox": [ + 400, + 219, + 518, + 308 + ], + "spans": [ + { + "bbox": [ + 400, + 219, + 518, + 308 + ], + "type": "image", + "image_path": "473d5c4fa40852da1be956bd91fc8dbb01033d977f73e000514033c57935eebb.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 91, + 331, + 525, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 331, + 525, + 344 + ], + "spans": [ + { + "bbox": [ + 91, + 331, + 525, + 344 + ], + "type": "text", + "content": "Input Text: \"Generate a new image which contains the layout detection results of the input image.\"" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 91, + 357, + 191, + 415 + ], + "blocks": [ + { + "bbox": [ + 91, + 357, + 191, + 415 + ], + "lines": [ + { + "bbox": [ + 91, + 357, + 191, + 415 + ], + "spans": [ + { + "bbox": [ + 91, + 357, + 191, + 415 + ], + "type": "image", + "image_path": "36374494cf58c80bf10d518e18df6bb7c26dfed3007927ce329edde282b717d7.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 194, + 356, + 294, + 418 + ], + "blocks": [ + { + "bbox": [ + 194, + 356, + 294, + 418 + ], + "lines": [ + { + "bbox": [ + 194, + 356, + 294, + 418 + ], + "spans": [ + { + "bbox": [ + 194, + 356, + 294, + 418 + ], + "type": "image", + "image_path": "e80a0e37e52eb35595a7df3a6aa08948e175cfb951ba4bd65e8b98ab1887d928.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 296, + 356, + 406, + 419 + ], + "blocks": [ + { + "bbox": [ + 296, + 356, + 406, + 419 + ], + "lines": [ + { + "bbox": [ + 296, + 356, + 406, + 419 + ], + "spans": [ + { + "bbox": [ + 296, + 356, + 406, + 419 + ], + "type": "image", + "image_path": "b2bbda9be8e2840ce6f8b02ad35c98e1401b5728eb36e05879ea84bdd7c2ff9f.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 410, + 356, + 522, + 418 + ], + "blocks": [ + { + "bbox": [ + 410, + 356, + 522, + 418 + ], + "lines": [ + { + "bbox": [ + 410, + 356, + 522, + 418 + ], + "spans": [ + { + "bbox": [ + 410, + 356, + 522, + 418 + ], + "type": "image", + "image_path": "d7357528516c7245255631ca4698524569152aafd5239b1a9cd9dfa6ee24c212.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 91, + 422, + 524, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 422, + 524, + 435 + ], + "spans": [ + { + "bbox": [ + 91, + 422, + 524, + 435 + ], + "type": "text", + "content": "Input Text: \"Generate a new image which contains the layout detection results of the input image.\"" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 110, + 442, + 166, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 442, + 166, + 454 + ], + "spans": [ + { + "bbox": [ + 110, + 442, + 166, + 454 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 230, + 442, + 262, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 442, + 262, + 452 + ], + "spans": [ + { + "bbox": [ + 230, + 442, + 262, + 452 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 442, + 389, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 442, + 389, + 452 + ], + "spans": [ + { + "bbox": [ + 316, + 442, + 389, + 452 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 436, + 442, + 492, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 436, + 442, + 492, + 453 + ], + "spans": [ + { + "bbox": [ + 436, + 442, + 492, + 453 + ], + "type": "text", + "content": "LayoutLMV3" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "77" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 76 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 173, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 173, + 83 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 173, + 83 + ], + "type": "text", + "content": "2.4.7 Text Detection" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 91, + 532, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 91, + 532, + 125 + ], + "spans": [ + { + "bbox": [ + 77, + 91, + 532, + 125 + ], + "type": "text", + "content": "The text detection task requires the model to detect the texts in the given image. In Figure 59, we compare the performance of GPT-4o, Gemini 2.0 Flash [99], and CRAFT [3] regarding to text detection. We observe that CRAFT exhibits better performance compared to the other models." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 129, + 532, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 129, + 532, + 185 + ], + "spans": [ + { + "bbox": [ + 77, + 129, + 532, + 185 + ], + "type": "text", + "content": "In the first test case, GPT-4o demonstrates comparable performance to CRAFT. However, in other cases, GPT-4o continuously generates some nonexistent texts and labels them as \"text area\". This issue becomes particularly evident in cluttered scenes or images with complex backgrounds. These false positives not only reduce detection precision but also make the output less reliable for downstream tasks such as OCR or document understanding. On the other hand, Gemini does not generate nonexistent texts but tends to over-predict some areas as text areas." + } + ] + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "78" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 77 + }, + { + "para_blocks": [ + { + "bbox": [ + 120, + 108, + 179, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 108, + 179, + 122 + ], + "spans": [ + { + "bbox": [ + 120, + 108, + 179, + 122 + ], + "type": "text", + "content": "Image-to-X" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 233, + 128, + 245, + 141 + ], + "blocks": [ + { + "bbox": [ + 233, + 128, + 245, + 141 + ], + "lines": [ + { + "bbox": [ + 233, + 128, + 245, + 141 + ], + "spans": [ + { + "bbox": [ + 233, + 128, + 245, + 141 + ], + "type": "image", + "image_path": "80922918a65d3be2dc64726ee8e795ae73b3e97479ae0a61fb7cbf01bf4cb9c1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 247, + 131, + 379, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 131, + 379, + 144 + ], + "spans": [ + { + "bbox": [ + 247, + 131, + 379, + 144 + ], + "type": "text", + "content": "Evaluation: Text Detection." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 93, + 150, + 196, + 230 + ], + "blocks": [ + { + "bbox": [ + 93, + 150, + 196, + 230 + ], + "lines": [ + { + "bbox": [ + 93, + 150, + 196, + 230 + ], + "spans": [ + { + "bbox": [ + 93, + 150, + 196, + 230 + ], + "type": "image", + "image_path": "8d12c4277ccc433463d916e0d4c021703de904c087034505189b0c4f6bee4dc4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 197, + 151, + 312, + 230 + ], + "blocks": [ + { + "bbox": [ + 197, + 151, + 312, + 230 + ], + "lines": [ + { + "bbox": [ + 197, + 151, + 312, + 230 + ], + "spans": [ + { + "bbox": [ + 197, + 151, + 312, + 230 + ], + "type": "image", + "image_path": "d79345dd614218dc4a3fbadc34f3efbc6475c52df944a483358394a2ae83a4e5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 313, + 152, + 416, + 230 + ], + "blocks": [ + { + "bbox": [ + 313, + 152, + 416, + 230 + ], + "lines": [ + { + "bbox": [ + 313, + 152, + 416, + 230 + ], + "spans": [ + { + "bbox": [ + 313, + 152, + 416, + 230 + ], + "type": "image", + "image_path": "3b08ebeb2ba10b1056a1d7752ce27e2b4990f6b98a2f7acdbabcd32480037452.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 417, + 152, + 519, + 229 + ], + "blocks": [ + { + "bbox": [ + 417, + 152, + 519, + 229 + ], + "lines": [ + { + "bbox": [ + 417, + 152, + 519, + 229 + ], + "spans": [ + { + "bbox": [ + 417, + 152, + 519, + 229 + ], + "type": "image", + "image_path": "0a75ab15149c9afdb4641c46e30bd50461202b7c88fa785bef866a2691d27f84.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 97, + 236, + 503, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 236, + 503, + 250 + ], + "spans": [ + { + "bbox": [ + 97, + 236, + 503, + 250 + ], + "type": "text", + "content": "Input Text: \"Generate a new image and label each line of text in the image with a green box\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 94, + 263, + 202, + 325 + ], + "blocks": [ + { + "bbox": [ + 94, + 263, + 202, + 325 + ], + "lines": [ + { + "bbox": [ + 94, + 263, + 202, + 325 + ], + "spans": [ + { + "bbox": [ + 94, + 263, + 202, + 325 + ], + "type": "image", + "image_path": "12fe1a849b28c956a4b37361bf45581ff6000a0b09f3ab5787b4647bbd9a3831.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 205, + 263, + 298, + 325 + ], + "blocks": [ + { + "bbox": [ + 205, + 263, + 298, + 325 + ], + "lines": [ + { + "bbox": [ + 205, + 263, + 298, + 325 + ], + "spans": [ + { + "bbox": [ + 205, + 263, + 298, + 325 + ], + "type": "image", + "image_path": "2ea1368e7787c97c60d6731352894c9323ed501fb6dc4e2d3cb1aa2c697b91f0.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 298, + 263, + 406, + 325 + ], + "blocks": [ + { + "bbox": [ + 298, + 263, + 406, + 325 + ], + "lines": [ + { + "bbox": [ + 298, + 263, + 406, + 325 + ], + "spans": [ + { + "bbox": [ + 298, + 263, + 406, + 325 + ], + "type": "image", + "image_path": "5fd98c00acb24bb685d7ae786468c021486683a40b42e8431c28d0d29da3cf89.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 408, + 264, + 517, + 326 + ], + "blocks": [ + { + "bbox": [ + 408, + 264, + 517, + 326 + ], + "lines": [ + { + "bbox": [ + 408, + 264, + 517, + 326 + ], + "spans": [ + { + "bbox": [ + 408, + 264, + 517, + 326 + ], + "type": "image", + "image_path": "05076e4a3056272c3ce291b647bd2b2098f8bef74c7807b41ab1d48af70b1f0e.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 97, + 328, + 503, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 328, + 503, + 341 + ], + "spans": [ + { + "bbox": [ + 97, + 328, + 503, + 341 + ], + "type": "text", + "content": "Input Text: \"Generate a new image and label each line of text in the image with a green box\"" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 94, + 347, + 200, + 487 + ], + "blocks": [ + { + "bbox": [ + 94, + 347, + 200, + 487 + ], + "lines": [ + { + "bbox": [ + 94, + 347, + 200, + 487 + ], + "spans": [ + { + "bbox": [ + 94, + 347, + 200, + 487 + ], + "type": "image", + "image_path": "f2a66d298cf6681e639f60487d91eb2376e9527b2385d109001392557fd3fc3c.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 201, + 347, + 308, + 487 + ], + "blocks": [ + { + "bbox": [ + 201, + 347, + 308, + 487 + ], + "lines": [ + { + "bbox": [ + 201, + 347, + 308, + 487 + ], + "spans": [ + { + "bbox": [ + 201, + 347, + 308, + 487 + ], + "type": "image", + "image_path": "0ceb9d3c24c10622601c5cd8f2bfe1590592e3f441bd27e97f56e47c7c523543.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 309, + 347, + 413, + 487 + ], + "blocks": [ + { + "bbox": [ + 309, + 347, + 413, + 487 + ], + "lines": [ + { + "bbox": [ + 309, + 347, + 413, + 487 + ], + "spans": [ + { + "bbox": [ + 309, + 347, + 413, + 487 + ], + "type": "image", + "image_path": "9b26a02ec460020f269c9e957ef3f51615c8aec20faba332c7ad88bddff5fcea.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 413, + 347, + 521, + 487 + ], + "blocks": [ + { + "bbox": [ + 413, + 347, + 521, + 487 + ], + "lines": [ + { + "bbox": [ + 413, + 347, + 521, + 487 + ], + "spans": [ + { + "bbox": [ + 413, + 347, + 521, + 487 + ], + "type": "image", + "image_path": "a4d89254c371d49daaab63ab7c798533a278ce2f2d6e10bca6da553223761a32.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 97, + 496, + 503, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 496, + 503, + 510 + ], + "spans": [ + { + "bbox": [ + 97, + 496, + 503, + 510 + ], + "type": "text", + "content": "Input Text: \"Generate a new image and label each line of text in the image with a green box\"" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 94, + 518, + 212, + 604 + ], + "blocks": [ + { + "bbox": [ + 94, + 518, + 212, + 604 + ], + "lines": [ + { + "bbox": [ + 94, + 518, + 212, + 604 + ], + "spans": [ + { + "bbox": [ + 94, + 518, + 212, + 604 + ], + "type": "image", + "image_path": "023c34de6dd441f99db1b377780b4fdcd6666f8679ab43c519b68c515e5d0add.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 657, + 533, + 691 + ], + "lines": [ + { + "bbox": [ + 77, + 657, + 533, + 691 + ], + "spans": [ + { + "bbox": [ + 77, + 657, + 533, + 691 + ], + "type": "text", + "content": "Figure 59: Task: Image to X: Text detection. Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and CRAFT [3]. Observation: The results show that GPT-4o frequently generates text that does not exist." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 214, + 529, + 295, + 590 + ], + "blocks": [ + { + "bbox": [ + 214, + 529, + 295, + 590 + ], + "lines": [ + { + "bbox": [ + 214, + 529, + 295, + 590 + ], + "spans": [ + { + "bbox": [ + 214, + 529, + 295, + 590 + ], + "type": "image", + "image_path": "9d85af9012642e27d9952f9795e008d868506f471396fa6b593f956b82ca6fd0.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 299, + 518, + 410, + 599 + ], + "blocks": [ + { + "bbox": [ + 299, + 518, + 410, + 599 + ], + "lines": [ + { + "bbox": [ + 299, + 518, + 410, + 599 + ], + "spans": [ + { + "bbox": [ + 299, + 518, + 410, + 599 + ], + "type": "image", + "image_path": "29eecd09cc58857aaac407a762e3d2472c1aae1724d08052e5c17f20df19b893.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 410, + 519, + 517, + 599 + ], + "blocks": [ + { + "bbox": [ + 410, + 519, + 517, + 599 + ], + "lines": [ + { + "bbox": [ + 410, + 519, + 517, + 599 + ], + "spans": [ + { + "bbox": [ + 410, + 519, + 517, + 599 + ], + "type": "image", + "image_path": "d4be8e3b705492a093a2511d862d76f96fe11049428146f4bcc9bedfb9524da6.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 99, + 609, + 503, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 609, + 503, + 622 + ], + "spans": [ + { + "bbox": [ + 99, + 609, + 503, + 622 + ], + "type": "text", + "content": "Input Text: \"Generate a new image and label each line of text in the image with a green box\"" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 113, + 627, + 170, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 627, + 170, + 639 + ], + "spans": [ + { + "bbox": [ + 113, + 627, + 170, + 639 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 232, + 627, + 265, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 627, + 265, + 638 + ], + "spans": [ + { + "bbox": [ + 232, + 627, + 265, + 638 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 318, + 627, + 391, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 627, + 391, + 638 + ], + "spans": [ + { + "bbox": [ + 318, + 627, + 391, + 638 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 445, + 627, + 477, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 445, + 627, + 477, + 638 + ], + "spans": [ + { + "bbox": [ + 445, + 627, + 477, + 638 + ], + "type": "text", + "content": "CRAFT" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "79" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 78 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 72, + 181, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 181, + 85 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 181, + 85 + ], + "type": "text", + "content": "2.4.8 Object Tracking" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 91, + 533, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 91, + 533, + 224 + ], + "spans": [ + { + "bbox": [ + 76, + 91, + 533, + 224 + ], + "type": "text", + "content": "The object tracking task requires the model to continuously locate and follow the specific object across the frames in a video sequence. We test the multi-object tracking, which requires the model to track several objects concurrently. We test four cases (Figure 60, 61, 62, 63). We compare GPT-4o, Gemini 2.0 Flash, and a recent SOTA method SAM-2 [86]. Our first observation is that GPT-4o seems unable to generate images that are consistent with the original image. This may be related to the nature of its generative model. Even if we ignore this, for the tracking task, SAM-2 still performs better, while GPT-4o will have problems such as failing to maintain consistent tracking of the target, frequently drifting, or losing the object entirely. In Figure 60, the output of GPT-4o generally demonstrates the ability to track objects, but there are also some defects. For example, a new object is even created out of the existing objects in the last picture generated by GPT-4o. We speculate that this is caused by the influence of the conversation context. In Figure 61, GPT-4o outputs some content that should not be in the output, such as the \"caf\" tag. In Figure 62, GPT-4o can track a relatively simple object, but it fuses two separate objects. In Figure 63, GPT-4o lacks the capability of tracking in the dense scenario." + } + ] + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "80" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 79 + }, + { + "para_blocks": [ + { + "bbox": [ + 154, + 103, + 453, + 119 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 103, + 453, + 119 + ], + "spans": [ + { + "bbox": [ + 154, + 103, + 453, + 119 + ], + "type": "text", + "content": "Evaluation: Object Tracking, Matching and Video Analysis." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 97, + 123, + 194, + 219 + ], + "blocks": [ + { + "bbox": [ + 97, + 123, + 194, + 219 + ], + "lines": [ + { + "bbox": [ + 97, + 123, + 194, + 219 + ], + "spans": [ + { + "bbox": [ + 97, + 123, + 194, + 219 + ], + "type": "image", + "image_path": "64bca9f9e552b5aa559f2be8f97c1db4addda01d6db566245d3a344dc004fa2b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 203, + 124, + 299, + 219 + ], + "blocks": [ + { + "bbox": [ + 203, + 124, + 299, + 219 + ], + "lines": [ + { + "bbox": [ + 203, + 124, + 299, + 219 + ], + "spans": [ + { + "bbox": [ + 203, + 124, + 299, + 219 + ], + "type": "image", + "image_path": "4c521d5dabb23441105ce25f3e4ea4fe5cdfd75cbe5ba899f0fa357d7cbf73e0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 312, + 124, + 407, + 219 + ], + "blocks": [ + { + "bbox": [ + 312, + 124, + 407, + 219 + ], + "lines": [ + { + "bbox": [ + 312, + 124, + 407, + 219 + ], + "spans": [ + { + "bbox": [ + 312, + 124, + 407, + 219 + ], + "type": "image", + "image_path": "eabf80aab7589d05f040c765b5655db13fd6ccc0f85020e3be5daad50092f0f9.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 417, + 124, + 513, + 219 + ], + "blocks": [ + { + "bbox": [ + 417, + 124, + 513, + 219 + ], + "lines": [ + { + "bbox": [ + 417, + 124, + 513, + 219 + ], + "spans": [ + { + "bbox": [ + 417, + 124, + 513, + 219 + ], + "type": "image", + "image_path": "075019b013e4717772a6d76e14fbaf9862c46b4f8223096c86fa73cb18a7fffb.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 97, + 220, + 506, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 220, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 97, + 220, + 506, + 255 + ], + "type": "text", + "content": "Input Text: \"This is the first frame of a video where I've marked four targets with different colored bounding boxes. I'll subsequently provide you with other frames from this video for object tracking of these four targets. Understood?\"" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 99, + 259, + 196, + 355 + ], + "blocks": [ + { + "bbox": [ + 99, + 259, + 196, + 355 + ], + "lines": [ + { + "bbox": [ + 99, + 259, + 196, + 355 + ], + "spans": [ + { + "bbox": [ + 99, + 259, + 196, + 355 + ], + "type": "image", + "image_path": "03ac5a08165418ccecfd0363ad1b72667a075da1b20d2f84dff1cf04027bdc79.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 203, + 259, + 300, + 355 + ], + "blocks": [ + { + "bbox": [ + 203, + 259, + 300, + 355 + ], + "lines": [ + { + "bbox": [ + 203, + 259, + 300, + 355 + ], + "spans": [ + { + "bbox": [ + 203, + 259, + 300, + 355 + ], + "type": "image", + "image_path": "6075fec13975411e989e0044e4a2f0ee1c763038760b1eee8ab0a205a97ce7fd.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 312, + 259, + 407, + 355 + ], + "blocks": [ + { + "bbox": [ + 312, + 259, + 407, + 355 + ], + "lines": [ + { + "bbox": [ + 312, + 259, + 407, + 355 + ], + "spans": [ + { + "bbox": [ + 312, + 259, + 407, + 355 + ], + "type": "image", + "image_path": "2ed33745b63fd77dc89a8024a98963f50198ec8a1e52ceda4ab8062eb4b2085d.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 417, + 259, + 513, + 355 + ], + "blocks": [ + { + "bbox": [ + 417, + 259, + 513, + 355 + ], + "lines": [ + { + "bbox": [ + 417, + 259, + 513, + 355 + ], + "spans": [ + { + "bbox": [ + 417, + 259, + 513, + 355 + ], + "type": "image", + "image_path": "9dff828e49740629bf2ee9f75d69169e08193f97717d5fed56fe0f6c1a5bcc42.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 97, + 357, + 494, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 357, + 494, + 392 + ], + "spans": [ + { + "bbox": [ + 97, + 357, + 494, + 392 + ], + "type": "text", + "content": "Input Text: \"You now need to perform object tracking on the four targets in this image and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 99, + 395, + 195, + 491 + ], + "blocks": [ + { + "bbox": [ + 99, + 395, + 195, + 491 + ], + "lines": [ + { + "bbox": [ + 99, + 395, + 195, + 491 + ], + "spans": [ + { + "bbox": [ + 99, + 395, + 195, + 491 + ], + "type": "image", + "image_path": "679acf7cda0f660094595d465faedbcf9b28ca58cbf88966cb6e491aa278d75a.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 704, + 533, + 750 + ], + "lines": [ + { + "bbox": [ + 77, + 704, + 533, + 750 + ], + "spans": [ + { + "bbox": [ + 77, + 704, + 533, + 750 + ], + "type": "text", + "content": "Figure 60: Task: Image to X: Object tracking, matching, and video analysis (1/4). Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SAM-2 [86]. Observation: This evaluation shows that GPT-4o has the capability of tracking objects, but it cannot generate a consistent image compared to the input image." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 203, + 395, + 299, + 491 + ], + "blocks": [ + { + "bbox": [ + 203, + 395, + 299, + 491 + ], + "lines": [ + { + "bbox": [ + 203, + 395, + 299, + 491 + ], + "spans": [ + { + "bbox": [ + 203, + 395, + 299, + 491 + ], + "type": "image", + "image_path": "770b79989d7eeca4a1a1dfe41749ee7619d15b0809497750a4ef3f48ae31bf1c.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 312, + 395, + 407, + 491 + ], + "blocks": [ + { + "bbox": [ + 312, + 395, + 407, + 491 + ], + "lines": [ + { + "bbox": [ + 312, + 395, + 407, + 491 + ], + "spans": [ + { + "bbox": [ + 312, + 395, + 407, + 491 + ], + "type": "image", + "image_path": "10f93dc20ac60dddd7d9e915fac07726f076d66e2f81a27aa151917e0e62abcf.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 417, + 396, + 513, + 491 + ], + "blocks": [ + { + "bbox": [ + 417, + 396, + 513, + 491 + ], + "lines": [ + { + "bbox": [ + 417, + 396, + 513, + 491 + ], + "spans": [ + { + "bbox": [ + 417, + 396, + 513, + 491 + ], + "type": "image", + "image_path": "8fc6d53c4cd8581616940efb6ac02413d37aece5c9462fc0de359572bfb0d570.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 97, + 494, + 501, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 494, + 501, + 529 + ], + "spans": [ + { + "bbox": [ + 97, + 494, + 501, + 529 + ], + "type": "text", + "content": "Input Text: \"Continue tracking the four targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 99, + 532, + 195, + 627 + ], + "blocks": [ + { + "bbox": [ + 99, + 532, + 195, + 627 + ], + "lines": [ + { + "bbox": [ + 99, + 532, + 195, + 627 + ], + "spans": [ + { + "bbox": [ + 99, + 532, + 195, + 627 + ], + "type": "image", + "image_path": "5d220158cfe75b892d7a3627f6bbfe90c6327f1ca348ff2386cb8e06bc0bd166.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 203, + 532, + 299, + 627 + ], + "blocks": [ + { + "bbox": [ + 203, + 532, + 299, + 627 + ], + "lines": [ + { + "bbox": [ + 203, + 532, + 299, + 627 + ], + "spans": [ + { + "bbox": [ + 203, + 532, + 299, + 627 + ], + "type": "image", + "image_path": "e5aa20f742421c44599c060ff033d5084778874fc965254ee47e49189abed76d.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 312, + 532, + 407, + 627 + ], + "blocks": [ + { + "bbox": [ + 312, + 532, + 407, + 627 + ], + "lines": [ + { + "bbox": [ + 312, + 532, + 407, + 627 + ], + "spans": [ + { + "bbox": [ + 312, + 532, + 407, + 627 + ], + "type": "image", + "image_path": "9a9ede0bf4fe178006bfdab612511041503aa542b158bd5a1cafebee66aa0d19.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 417, + 532, + 513, + 627 + ], + "blocks": [ + { + "bbox": [ + 417, + 532, + 513, + 627 + ], + "lines": [ + { + "bbox": [ + 417, + 532, + 513, + 627 + ], + "spans": [ + { + "bbox": [ + 417, + 532, + 513, + 627 + ], + "type": "image", + "image_path": "4cb8d78d25db9472f21ac15da10a306c42418c98f395c4de73dab05c66763de4.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 97, + 634, + 501, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 634, + 501, + 669 + ], + "spans": [ + { + "bbox": [ + 97, + 634, + 501, + 669 + ], + "type": "text", + "content": "Input Text: \"Continue tracking the four targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 117, + 673, + 173, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 673, + 173, + 686 + ], + "spans": [ + { + "bbox": [ + 117, + 673, + 173, + 686 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 235, + 673, + 267, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 673, + 267, + 684 + ], + "spans": [ + { + "bbox": [ + 235, + 673, + 267, + 684 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 323, + 673, + 395, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 673, + 395, + 684 + ], + "spans": [ + { + "bbox": [ + 323, + 673, + 395, + 684 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 456, + 673, + 484, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 456, + 673, + 484, + 683 + ], + "spans": [ + { + "bbox": [ + 456, + 673, + 484, + 683 + ], + "type": "text", + "content": "SAM2" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 121, + 79, + 181, + 91 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 79, + 181, + 91 + ], + "spans": [ + { + "bbox": [ + 121, + 79, + 181, + 91 + ], + "type": "text", + "content": "Image-to-X" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "81" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 80 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 145, + 181, + 159 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 145, + 181, + 159 + ], + "spans": [ + { + "bbox": [ + 121, + 145, + 181, + 159 + ], + "type": "text", + "content": "Image-to-X" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 154, + 168, + 454, + 183 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 168, + 454, + 183 + ], + "spans": [ + { + "bbox": [ + 154, + 168, + 454, + 183 + ], + "type": "text", + "content": "Evaluation: Object Tracking, Matching and Video Analysis." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 97, + 191, + 196, + 251 + ], + "blocks": [ + { + "bbox": [ + 97, + 191, + 196, + 251 + ], + "lines": [ + { + "bbox": [ + 97, + 191, + 196, + 251 + ], + "spans": [ + { + "bbox": [ + 97, + 191, + 196, + 251 + ], + "type": "image", + "image_path": "123381a94924d4ad9fab326af061828f48aab18ef9bee3847f59fc56015e22fb.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 203, + 191, + 301, + 250 + ], + "blocks": [ + { + "bbox": [ + 203, + 191, + 301, + 250 + ], + "lines": [ + { + "bbox": [ + 203, + 191, + 301, + 250 + ], + "spans": [ + { + "bbox": [ + 203, + 191, + 301, + 250 + ], + "type": "image", + "image_path": "17ab50440ba1a2025044fd3cd0741266c996928780e88a97d4c4333b56dfbe5a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 310, + 191, + 409, + 251 + ], + "blocks": [ + { + "bbox": [ + 310, + 191, + 409, + 251 + ], + "lines": [ + { + "bbox": [ + 310, + 191, + 409, + 251 + ], + "spans": [ + { + "bbox": [ + 310, + 191, + 409, + 251 + ], + "type": "image", + "image_path": "b44b50f9572bbd72865bce64600e8f10dc36affa85e764b23905c4db84176b3f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 416, + 191, + 515, + 250 + ], + "blocks": [ + { + "bbox": [ + 416, + 191, + 515, + 250 + ], + "lines": [ + { + "bbox": [ + 416, + 191, + 515, + 250 + ], + "spans": [ + { + "bbox": [ + 416, + 191, + 515, + 250 + ], + "type": "image", + "image_path": "1bdda49b07b096341fdec95b3c342e834c5c3255bfacb631d015bf9dcbf57d2b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 97, + 251, + 497, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 251, + 497, + 285 + ], + "spans": [ + { + "bbox": [ + 97, + 251, + 497, + 285 + ], + "type": "text", + "content": "Input Text: \"This is the first frame of a video where I've marked three targets with different colored bounding boxes. I'll subsequently provide you with other frames from this video for object tracking of these three targets. Understood?\"" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 99, + 288, + 196, + 344 + ], + "blocks": [ + { + "bbox": [ + 99, + 288, + 196, + 344 + ], + "lines": [ + { + "bbox": [ + 99, + 288, + 196, + 344 + ], + "spans": [ + { + "bbox": [ + 99, + 288, + 196, + 344 + ], + "type": "image", + "image_path": "f266fde975c8808d3f3339c241e64e53840be40a286abf4b782d7dec9c606c51.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 203, + 288, + 299, + 345 + ], + "blocks": [ + { + "bbox": [ + 203, + 288, + 299, + 345 + ], + "lines": [ + { + "bbox": [ + 203, + 288, + 299, + 345 + ], + "spans": [ + { + "bbox": [ + 203, + 288, + 299, + 345 + ], + "type": "image", + "image_path": "6bd20b3ebf4a99d1a659fb4acf85915aae6729d1dae0be2eba9584761c9e9db7.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 288, + 408, + 345 + ], + "blocks": [ + { + "bbox": [ + 310, + 288, + 408, + 345 + ], + "lines": [ + { + "bbox": [ + 310, + 288, + 408, + 345 + ], + "spans": [ + { + "bbox": [ + 310, + 288, + 408, + 345 + ], + "type": "image", + "image_path": "19ee65abb68b04bcc6de8f3231bd1dfc53040c57e2774f2a64269f4ba9efb4df.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 416, + 288, + 514, + 345 + ], + "blocks": [ + { + "bbox": [ + 416, + 288, + 514, + 345 + ], + "lines": [ + { + "bbox": [ + 416, + 288, + 514, + 345 + ], + "spans": [ + { + "bbox": [ + 416, + 288, + 514, + 345 + ], + "type": "image", + "image_path": "2864e1c72caa29f54357f917cad03463a7b5f8e666314b153768cc9c9bd444d6.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 97, + 346, + 499, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 346, + 499, + 380 + ], + "spans": [ + { + "bbox": [ + 97, + 346, + 499, + 380 + ], + "type": "text", + "content": "Input Text: \"You now need to perform object tracking on the three targets in this image and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 99, + 383, + 196, + 440 + ], + "blocks": [ + { + "bbox": [ + 99, + 383, + 196, + 440 + ], + "lines": [ + { + "bbox": [ + 99, + 383, + 196, + 440 + ], + "spans": [ + { + "bbox": [ + 99, + 383, + 196, + 440 + ], + "type": "image", + "image_path": "17281215ee551aee19132f26e2f57e9634fc4c01686dbd233f99e38afdc31bac.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 607, + 533, + 653 + ], + "lines": [ + { + "bbox": [ + 77, + 607, + 533, + 653 + ], + "spans": [ + { + "bbox": [ + 77, + 607, + 533, + 653 + ], + "type": "text", + "content": "Figure 61: Task: Image to X: Object tracking, matching, and video analysis (2/4). Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SAM-2 [86]. Observation: This evaluation shows that GPT-4o has the capability of tracking objects, but it cannot generate a consistent image compared to the input image." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 203, + 383, + 299, + 440 + ], + "blocks": [ + { + "bbox": [ + 203, + 383, + 299, + 440 + ], + "lines": [ + { + "bbox": [ + 203, + 383, + 299, + 440 + ], + "spans": [ + { + "bbox": [ + 203, + 383, + 299, + 440 + ], + "type": "image", + "image_path": "dd4f8ceb102f253e19dc4e99a539f2e28701caecb904ac3596b116d8f9213bed.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 310, + 383, + 408, + 440 + ], + "blocks": [ + { + "bbox": [ + 310, + 383, + 408, + 440 + ], + "lines": [ + { + "bbox": [ + 310, + 383, + 408, + 440 + ], + "spans": [ + { + "bbox": [ + 310, + 383, + 408, + 440 + ], + "type": "image", + "image_path": "288c7e6c2ba7c4828a38dbb2d93551531e7662268ca80529efd05e46c3b9cdc5.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 416, + 383, + 514, + 440 + ], + "blocks": [ + { + "bbox": [ + 416, + 383, + 514, + 440 + ], + "lines": [ + { + "bbox": [ + 416, + 383, + 514, + 440 + ], + "spans": [ + { + "bbox": [ + 416, + 383, + 514, + 440 + ], + "type": "image", + "image_path": "b50fd5a292c370336aaa3e339bafb070bea7a7a2f80c934dad264af03b23bb2d.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 97, + 441, + 501, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 441, + 501, + 475 + ], + "spans": [ + { + "bbox": [ + 97, + 441, + 501, + 475 + ], + "type": "text", + "content": "Input Text: \"Continue tracking the three targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 99, + 478, + 196, + 535 + ], + "blocks": [ + { + "bbox": [ + 99, + 478, + 196, + 535 + ], + "lines": [ + { + "bbox": [ + 99, + 478, + 196, + 535 + ], + "spans": [ + { + "bbox": [ + 99, + 478, + 196, + 535 + ], + "type": "image", + "image_path": "f835ef19b16a59831866bcf40d9d9bb5ac713048e65f176650e206cc3a2dd8ec.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 203, + 478, + 299, + 535 + ], + "blocks": [ + { + "bbox": [ + 203, + 478, + 299, + 535 + ], + "lines": [ + { + "bbox": [ + 203, + 478, + 299, + 535 + ], + "spans": [ + { + "bbox": [ + 203, + 478, + 299, + 535 + ], + "type": "image", + "image_path": "0be63390ebc394d88f7518d11a9497eca8059c414bc31788dd9f9bad0ee7ee64.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 310, + 478, + 408, + 535 + ], + "blocks": [ + { + "bbox": [ + 310, + 478, + 408, + 535 + ], + "lines": [ + { + "bbox": [ + 310, + 478, + 408, + 535 + ], + "spans": [ + { + "bbox": [ + 310, + 478, + 408, + 535 + ], + "type": "image", + "image_path": "b09f487669528fc2963ebabd0310ed69c5b420b7512e220fe3465e3c2ed5e12b.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 416, + 478, + 512, + 535 + ], + "blocks": [ + { + "bbox": [ + 416, + 478, + 512, + 535 + ], + "lines": [ + { + "bbox": [ + 416, + 478, + 512, + 535 + ], + "spans": [ + { + "bbox": [ + 416, + 478, + 512, + 535 + ], + "type": "image", + "image_path": "1f674c14a05985ac60abfb2449ee82311b3fd74a5ce3e0baab98c7fbcc2f60c0.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 97, + 537, + 501, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 537, + 501, + 573 + ], + "spans": [ + { + "bbox": [ + 97, + 537, + 501, + 573 + ], + "type": "text", + "content": "Input Text: \"Continue tracking the three targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 118, + 578, + 175, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 578, + 175, + 590 + ], + "spans": [ + { + "bbox": [ + 118, + 578, + 175, + 590 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 235, + 578, + 267, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 578, + 267, + 588 + ], + "spans": [ + { + "bbox": [ + 235, + 578, + 267, + 588 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 322, + 578, + 394, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 578, + 394, + 588 + ], + "spans": [ + { + "bbox": [ + 322, + 578, + 394, + 588 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 450, + 578, + 478, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 450, + 578, + 478, + 588 + ], + "spans": [ + { + "bbox": [ + 450, + 578, + 478, + 588 + ], + "type": "text", + "content": "SAM2" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "82" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 81 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 175, + 180, + 187 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 175, + 180, + 187 + ], + "spans": [ + { + "bbox": [ + 121, + 175, + 180, + 187 + ], + "type": "text", + "content": "Image-to-X" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 154, + 196, + 166, + 210 + ], + "blocks": [ + { + "bbox": [ + 154, + 196, + 166, + 210 + ], + "lines": [ + { + "bbox": [ + 154, + 196, + 166, + 210 + ], + "spans": [ + { + "bbox": [ + 154, + 196, + 166, + 210 + ], + "type": "image", + "image_path": "2d588085b2425d35e86ead8fd151a9f1b0a00f306e88ed5971910bddaaa8e8b1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 167, + 199, + 452, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 199, + 452, + 213 + ], + "spans": [ + { + "bbox": [ + 167, + 199, + 452, + 213 + ], + "type": "text", + "content": "Evaluation: Object Tracking, Matching and Video Analysis." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 97, + 217, + 197, + 260 + ], + "blocks": [ + { + "bbox": [ + 97, + 217, + 197, + 260 + ], + "lines": [ + { + "bbox": [ + 97, + 217, + 197, + 260 + ], + "spans": [ + { + "bbox": [ + 97, + 217, + 197, + 260 + ], + "type": "image", + "image_path": "a1e959570f347e3fef4bcc4ccda36cec01a4c6a5cdd272c2a85fbb3cc2dea20d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 203, + 217, + 302, + 261 + ], + "blocks": [ + { + "bbox": [ + 203, + 217, + 302, + 261 + ], + "lines": [ + { + "bbox": [ + 203, + 217, + 302, + 261 + ], + "spans": [ + { + "bbox": [ + 203, + 217, + 302, + 261 + ], + "type": "image", + "image_path": "c2af40c5244e0f72a0326fc6c92a7e15810a0e852285d96d92e3191254cf5eaa.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 309, + 217, + 407, + 261 + ], + "blocks": [ + { + "bbox": [ + 309, + 217, + 407, + 261 + ], + "lines": [ + { + "bbox": [ + 309, + 217, + 407, + 261 + ], + "spans": [ + { + "bbox": [ + 309, + 217, + 407, + 261 + ], + "type": "image", + "image_path": "ddca6e48a5804db47d5999c3e273c4763a7ecac6ea67d75a092e6a71ab128b72.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 414, + 217, + 513, + 261 + ], + "blocks": [ + { + "bbox": [ + 414, + 217, + 513, + 261 + ], + "lines": [ + { + "bbox": [ + 414, + 217, + 513, + 261 + ], + "spans": [ + { + "bbox": [ + 414, + 217, + 513, + 261 + ], + "type": "image", + "image_path": "946427a8be6f45ad8290603f01d85b6ddddd4e891e723686e136a789ad2d67dc.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 96, + 262, + 505, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 262, + 505, + 297 + ], + "spans": [ + { + "bbox": [ + 96, + 262, + 505, + 297 + ], + "type": "text", + "content": "Input Text: \"This is the first frame of a video where I've marked four targets with different colored bounding boxes. I'll subsequently provide you with other frames from this video for object tracking of these four targets. Understood?\"" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 97, + 300, + 197, + 343 + ], + "blocks": [ + { + "bbox": [ + 97, + 300, + 197, + 343 + ], + "lines": [ + { + "bbox": [ + 97, + 300, + 197, + 343 + ], + "spans": [ + { + "bbox": [ + 97, + 300, + 197, + 343 + ], + "type": "image", + "image_path": "9e99b81570d109732b42b178967ae11e9ed7d965809fd7f2fd60d19ccbc7cd6d.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 203, + 300, + 302, + 343 + ], + "blocks": [ + { + "bbox": [ + 203, + 300, + 302, + 343 + ], + "lines": [ + { + "bbox": [ + 203, + 300, + 302, + 343 + ], + "spans": [ + { + "bbox": [ + 203, + 300, + 302, + 343 + ], + "type": "image", + "image_path": "d39af4eb408498b81d98d8cb2ffed5b472bc1b7317c2c8a5374a4a56e4dfb056.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 309, + 300, + 407, + 343 + ], + "blocks": [ + { + "bbox": [ + 309, + 300, + 407, + 343 + ], + "lines": [ + { + "bbox": [ + 309, + 300, + 407, + 343 + ], + "spans": [ + { + "bbox": [ + 309, + 300, + 407, + 343 + ], + "type": "image", + "image_path": "9682ba919f23fd2efddf43910798451e8e7f75415d38ed0e818f82b242e57922.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 414, + 300, + 513, + 343 + ], + "blocks": [ + { + "bbox": [ + 414, + 300, + 513, + 343 + ], + "lines": [ + { + "bbox": [ + 414, + 300, + 513, + 343 + ], + "spans": [ + { + "bbox": [ + 414, + 300, + 513, + 343 + ], + "type": "image", + "image_path": "1cf0ebdbd8d076d3769a4ec23e0b1f57389127e1424e5db6a56d16c052155351.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 96, + 345, + 495, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 345, + 495, + 379 + ], + "spans": [ + { + "bbox": [ + 96, + 345, + 495, + 379 + ], + "type": "text", + "content": "Input Text: \"You now need to perform object tracking on the four targets in this image and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 97, + 380, + 197, + 422 + ], + "blocks": [ + { + "bbox": [ + 97, + 380, + 197, + 422 + ], + "lines": [ + { + "bbox": [ + 97, + 380, + 197, + 422 + ], + "spans": [ + { + "bbox": [ + 97, + 380, + 197, + 422 + ], + "type": "image", + "image_path": "f90e61bd1f023e6f33aef6353e15aca3f50253a83831ca3012b01fddcbeee732.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 577, + 533, + 622 + ], + "lines": [ + { + "bbox": [ + 77, + 577, + 533, + 622 + ], + "spans": [ + { + "bbox": [ + 77, + 577, + 533, + 622 + ], + "type": "text", + "content": "Figure 62: Task: Image to X: Object tracking, matching, and video analysis (3/4). Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SAM-2 [86]. Observation: This evaluation shows that GPT-4o has the capability of tracking objects, but it cannot generate a consistent image compared to the input image." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 203, + 380, + 302, + 422 + ], + "blocks": [ + { + "bbox": [ + 203, + 380, + 302, + 422 + ], + "lines": [ + { + "bbox": [ + 203, + 380, + 302, + 422 + ], + "spans": [ + { + "bbox": [ + 203, + 380, + 302, + 422 + ], + "type": "image", + "image_path": "cb1c6099278ab7785abb32f0832616b8e30bed5c7c7f8049237a2dfc3dbc2c4a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 309, + 380, + 408, + 422 + ], + "blocks": [ + { + "bbox": [ + 309, + 380, + 408, + 422 + ], + "lines": [ + { + "bbox": [ + 309, + 380, + 408, + 422 + ], + "spans": [ + { + "bbox": [ + 309, + 380, + 408, + 422 + ], + "type": "image", + "image_path": "c437cfe35c54814ac08dea9c46d0e6240fb84f38e9174f214d7a634c29fe2495.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 414, + 380, + 513, + 422 + ], + "blocks": [ + { + "bbox": [ + 414, + 380, + 513, + 422 + ], + "lines": [ + { + "bbox": [ + 414, + 380, + 513, + 422 + ], + "spans": [ + { + "bbox": [ + 414, + 380, + 513, + 422 + ], + "type": "image", + "image_path": "4973b35320e8a2b303648fca1abc0044f2a37f44f7b47f898eb02a0074a2a7ba.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 96, + 425, + 501, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 425, + 501, + 460 + ], + "spans": [ + { + "bbox": [ + 96, + 425, + 501, + 460 + ], + "type": "text", + "content": "Input Text: \"Continue tracking the four targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 97, + 462, + 197, + 506 + ], + "blocks": [ + { + "bbox": [ + 97, + 462, + 197, + 506 + ], + "lines": [ + { + "bbox": [ + 97, + 462, + 197, + 506 + ], + "spans": [ + { + "bbox": [ + 97, + 462, + 197, + 506 + ], + "type": "image", + "image_path": "85f7bef2b593291c46513388cafafe9023a5e4f53bc7397e3a888a28d2cda80d.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 203, + 462, + 302, + 506 + ], + "blocks": [ + { + "bbox": [ + 203, + 462, + 302, + 506 + ], + "lines": [ + { + "bbox": [ + 203, + 462, + 302, + 506 + ], + "spans": [ + { + "bbox": [ + 203, + 462, + 302, + 506 + ], + "type": "image", + "image_path": "a5b89b647f49cafe186c5a2f230a7d551fedbe9a4b6ab523991f91c5baceb76f.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 309, + 462, + 408, + 506 + ], + "blocks": [ + { + "bbox": [ + 309, + 462, + 408, + 506 + ], + "lines": [ + { + "bbox": [ + 309, + 462, + 408, + 506 + ], + "spans": [ + { + "bbox": [ + 309, + 462, + 408, + 506 + ], + "type": "image", + "image_path": "21754ab30160b46ae72d81bb54a4d2c4f2af0b2cb67b22e08bf3c41755442754.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 414, + 462, + 513, + 506 + ], + "blocks": [ + { + "bbox": [ + 414, + 462, + 513, + 506 + ], + "lines": [ + { + "bbox": [ + 414, + 462, + 513, + 506 + ], + "spans": [ + { + "bbox": [ + 414, + 462, + 513, + 506 + ], + "type": "image", + "image_path": "d0e994754d3ed22f81935820cf17a41860327c42094e23bb6b7a579509ca0543.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 96, + 509, + 501, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 509, + 501, + 544 + ], + "spans": [ + { + "bbox": [ + 96, + 509, + 501, + 544 + ], + "type": "text", + "content": "Input Text: \"Continue tracking the four targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 121, + 548, + 177, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 548, + 177, + 560 + ], + "spans": [ + { + "bbox": [ + 121, + 548, + 177, + 560 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 241, + 548, + 272, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 548, + 272, + 559 + ], + "spans": [ + { + "bbox": [ + 241, + 548, + 272, + 559 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 322, + 548, + 394, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 548, + 394, + 559 + ], + "spans": [ + { + "bbox": [ + 322, + 548, + 394, + 559 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 451, + 549, + 479, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 451, + 549, + 479, + 559 + ], + "spans": [ + { + "bbox": [ + 451, + 549, + 479, + 559 + ], + "type": "text", + "content": "SAM2" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "83" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 82 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 149, + 180, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 149, + 180, + 163 + ], + "spans": [ + { + "bbox": [ + 121, + 149, + 180, + 163 + ], + "type": "text", + "content": "Image-to-X" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 154, + 170, + 454, + 185 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 170, + 454, + 185 + ], + "spans": [ + { + "bbox": [ + 154, + 170, + 454, + 185 + ], + "type": "text", + "content": "Evaluation: Object Tracking, Matching and Video Analysis." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 97, + 191, + 197, + 249 + ], + "blocks": [ + { + "bbox": [ + 97, + 191, + 197, + 249 + ], + "lines": [ + { + "bbox": [ + 97, + 191, + 197, + 249 + ], + "spans": [ + { + "bbox": [ + 97, + 191, + 197, + 249 + ], + "type": "image", + "image_path": "9801dce9b0ee5070b386427554ed58b6a513f89be080d33cee85238e85084ce4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 203, + 191, + 302, + 249 + ], + "blocks": [ + { + "bbox": [ + 203, + 191, + 302, + 249 + ], + "lines": [ + { + "bbox": [ + 203, + 191, + 302, + 249 + ], + "spans": [ + { + "bbox": [ + 203, + 191, + 302, + 249 + ], + "type": "image", + "image_path": "47c0aa44b5c1e668ed4e267b28409511fe44987d1f237b43ee033cba301e51ee.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 309, + 191, + 408, + 249 + ], + "blocks": [ + { + "bbox": [ + 309, + 191, + 408, + 249 + ], + "lines": [ + { + "bbox": [ + 309, + 191, + 408, + 249 + ], + "spans": [ + { + "bbox": [ + 309, + 191, + 408, + 249 + ], + "type": "image", + "image_path": "c5d213403e828619e550ddf5ada406e961fa2efb34275366ed904f09ea4b9216.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 413, + 191, + 513, + 249 + ], + "blocks": [ + { + "bbox": [ + 413, + 191, + 513, + 249 + ], + "lines": [ + { + "bbox": [ + 413, + 191, + 513, + 249 + ], + "spans": [ + { + "bbox": [ + 413, + 191, + 513, + 249 + ], + "type": "image", + "image_path": "13971dbe9a9a59527de67d029db2291646e369394e8bee5fe2ff780074999bab.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 96, + 250, + 499, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 250, + 499, + 283 + ], + "spans": [ + { + "bbox": [ + 96, + 250, + 499, + 283 + ], + "type": "text", + "content": "Input Text: \"This is the first frame of a video where I've marked six targets with different colored bounding boxes. I'll subsequently provide you with other frames from this video for object tracking of these six targets. Understood?\"" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 98, + 285, + 197, + 342 + ], + "blocks": [ + { + "bbox": [ + 98, + 285, + 197, + 342 + ], + "lines": [ + { + "bbox": [ + 98, + 285, + 197, + 342 + ], + "spans": [ + { + "bbox": [ + 98, + 285, + 197, + 342 + ], + "type": "image", + "image_path": "4f4a54c95a2c61f0b917046158f22c0ac2fb4eaa7c0013af0028c8b141cd8788.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 203, + 285, + 302, + 342 + ], + "blocks": [ + { + "bbox": [ + 203, + 285, + 302, + 342 + ], + "lines": [ + { + "bbox": [ + 203, + 285, + 302, + 342 + ], + "spans": [ + { + "bbox": [ + 203, + 285, + 302, + 342 + ], + "type": "image", + "image_path": "11029002e14154e37a105bc63a9acff74ed50d918df4d80c2a9f039305b6e741.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 309, + 285, + 407, + 342 + ], + "blocks": [ + { + "bbox": [ + 309, + 285, + 407, + 342 + ], + "lines": [ + { + "bbox": [ + 309, + 285, + 407, + 342 + ], + "spans": [ + { + "bbox": [ + 309, + 285, + 407, + 342 + ], + "type": "image", + "image_path": "915695ce6be4cdda6f09d7deabc426642440f267759651fc6ec01c427928bd50.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 414, + 285, + 512, + 342 + ], + "blocks": [ + { + "bbox": [ + 414, + 285, + 512, + 342 + ], + "lines": [ + { + "bbox": [ + 414, + 285, + 512, + 342 + ], + "spans": [ + { + "bbox": [ + 414, + 285, + 512, + 342 + ], + "type": "image", + "image_path": "2b470b9a2e3376254260fb3776461599c8d766e1b076eaec1370e72b4b2e5b06.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 96, + 343, + 495, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 343, + 495, + 377 + ], + "spans": [ + { + "bbox": [ + 96, + 343, + 495, + 377 + ], + "type": "text", + "content": "Input Text: \"You now need to perform object tracking on the six targets in this image and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 98, + 380, + 197, + 437 + ], + "blocks": [ + { + "bbox": [ + 98, + 380, + 197, + 437 + ], + "lines": [ + { + "bbox": [ + 98, + 380, + 197, + 437 + ], + "spans": [ + { + "bbox": [ + 98, + 380, + 197, + 437 + ], + "type": "image", + "image_path": "4610ae5599e3f85d65d10713443204f4af850e0751404e619cfc018f179db18b.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 604, + 533, + 648 + ], + "lines": [ + { + "bbox": [ + 77, + 604, + 533, + 648 + ], + "spans": [ + { + "bbox": [ + 77, + 604, + 533, + 648 + ], + "type": "text", + "content": "Figure 63: Task: Image to X: Object tracking, matching, and video analysis (4/4). Setup: Each row shows an input image and a text prompt with outputs from GPT-4o, Gemini 2.0 Flash [99], and SAM-2 [86]. Observation: This evaluation shows that GPT-4o has the capability of tracking objects, but it cannot generate a consistent image compared to the input image." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 203, + 380, + 302, + 437 + ], + "blocks": [ + { + "bbox": [ + 203, + 380, + 302, + 437 + ], + "lines": [ + { + "bbox": [ + 203, + 380, + 302, + 437 + ], + "spans": [ + { + "bbox": [ + 203, + 380, + 302, + 437 + ], + "type": "image", + "image_path": "17df35e7ffc6198a7ee15be18d8062a7c6c3846eac54441a5167179ad1116281.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 309, + 380, + 408, + 437 + ], + "blocks": [ + { + "bbox": [ + 309, + 380, + 408, + 437 + ], + "lines": [ + { + "bbox": [ + 309, + 380, + 408, + 437 + ], + "spans": [ + { + "bbox": [ + 309, + 380, + 408, + 437 + ], + "type": "image", + "image_path": "3979db3e010b09be23fdd79665b9946d9fa4adc89d8766528d7afd7b653cf603.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 414, + 380, + 512, + 437 + ], + "blocks": [ + { + "bbox": [ + 414, + 380, + 512, + 437 + ], + "lines": [ + { + "bbox": [ + 414, + 380, + 512, + 437 + ], + "spans": [ + { + "bbox": [ + 414, + 380, + 512, + 437 + ], + "type": "image", + "image_path": "136b68479499a83964864cd84b1bd318f9fd41257d6d18e9f8d976b00b3c4581.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 96, + 439, + 501, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 439, + 501, + 474 + ], + "spans": [ + { + "bbox": [ + 96, + 439, + 501, + 474 + ], + "type": "text", + "content": "Input Text: \"Continue tracking the six targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 99, + 476, + 197, + 533 + ], + "blocks": [ + { + "bbox": [ + 99, + 476, + 197, + 533 + ], + "lines": [ + { + "bbox": [ + 99, + 476, + 197, + 533 + ], + "spans": [ + { + "bbox": [ + 99, + 476, + 197, + 533 + ], + "type": "image", + "image_path": "ee95015664fc94518401d21a2920ffd79e12d1ce90aaee7e7c2e23d8fb252353.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 203, + 476, + 302, + 533 + ], + "blocks": [ + { + "bbox": [ + 203, + 476, + 302, + 533 + ], + "lines": [ + { + "bbox": [ + 203, + 476, + 302, + 533 + ], + "spans": [ + { + "bbox": [ + 203, + 476, + 302, + 533 + ], + "type": "image", + "image_path": "9bb8f63af64ed04966c8891cf4cf6ec49bd44f0d1a11eca0e431b45d7918994e.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 308, + 476, + 408, + 533 + ], + "blocks": [ + { + "bbox": [ + 308, + 476, + 408, + 533 + ], + "lines": [ + { + "bbox": [ + 308, + 476, + 408, + 533 + ], + "spans": [ + { + "bbox": [ + 308, + 476, + 408, + 533 + ], + "type": "image", + "image_path": "b2936841b119f7c4783c46da417c26650df828319071a28b934df3513e885121.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 413, + 476, + 512, + 533 + ], + "blocks": [ + { + "bbox": [ + 413, + 476, + 512, + 533 + ], + "lines": [ + { + "bbox": [ + 413, + 476, + 512, + 533 + ], + "spans": [ + { + "bbox": [ + 413, + 476, + 512, + 533 + ], + "type": "image", + "image_path": "a093ca2444e0583bf58d16a259862c8f04e828919d40f21a081d04a7116fc357.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 96, + 534, + 501, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 534, + 501, + 568 + ], + "spans": [ + { + "bbox": [ + 96, + 534, + 501, + 568 + ], + "type": "text", + "content": "Input Text: \"Continue tracking the six targets on this new frame and draw the detected bounding boxes on them. Please provide me directly with the final output image. Return result image by using image generation.\"" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 121, + 574, + 178, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 574, + 178, + 586 + ], + "spans": [ + { + "bbox": [ + 121, + 574, + 178, + 586 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 236, + 574, + 269, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 574, + 269, + 584 + ], + "spans": [ + { + "bbox": [ + 236, + 574, + 269, + 584 + ], + "type": "text", + "content": "GPT 40" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 322, + 574, + 394, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 574, + 394, + 584 + ], + "spans": [ + { + "bbox": [ + 322, + 574, + 394, + 584 + ], + "type": "text", + "content": "Gemini 2.0 Flash" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 449, + 574, + 477, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 449, + 574, + 477, + 584 + ], + "spans": [ + { + "bbox": [ + 449, + 574, + 477, + 584 + ], + "type": "text", + "content": "SAM2" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "84" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 83 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 71, + 159, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 71, + 159, + 83 + ], + "spans": [ + { + "bbox": [ + 78, + 71, + 159, + 83 + ], + "type": "text", + "content": "3 Limitations" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 96, + 532, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 96, + 532, + 131 + ], + "spans": [ + { + "bbox": [ + 77, + 96, + 532, + 131 + ], + "type": "text", + "content": "Although GPT-4o demonstrates impressive capabilities across a wide range of image generation tasks, several limitations remain. These challenges highlight key areas for future improvement in developing unified foundation models for vision-language generation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 78, + 144, + 206, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 144, + 206, + 155 + ], + "spans": [ + { + "bbox": [ + 78, + 144, + 206, + 155 + ], + "type": "text", + "content": "3.1 Inconsistent Generation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 164, + 531, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 164, + 531, + 242 + ], + "spans": [ + { + "bbox": [ + 77, + 164, + 531, + 242 + ], + "type": "text", + "content": "While GPT-4o often produces high-quality and semantically relevant images conditioned on textual prompts, it occasionally exhibits inconsistencies. Specifically, the model may generate visually compelling outputs that deviate from precise semantic cues of the input image, such as object count, spatial layout, specific shapes, or designated colors. These inconsistencies are especially problematic in tasks requiring partial image editing or compositional accuracy. Notably, such issues are less common in diffusion-based models or discrete denoising architectures like MaskGIT [11, 6], suggesting that GPT-4o operates under a distinct generative paradigm with inherent trade-offs in fidelity and control." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 78, + 255, + 162, + 266 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 255, + 162, + 266 + ], + "spans": [ + { + "bbox": [ + 78, + 255, + 162, + 266 + ], + "type": "text", + "content": "3.2 Hallucination" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 77, + 276, + 531, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 276, + 531, + 344 + ], + "spans": [ + { + "bbox": [ + 77, + 276, + 531, + 344 + ], + "type": "text", + "content": "GPT-4o is also susceptible to hallucinations—producing content that is logically implausible, semantically inconsistent, or factually incorrect. These include fabricating non-existent objects or geographical features (e.g., imaginary islands or landmarks), and misrepresenting relationships between entities. Such errors are particularly prevalent in complex or underspecified prompts, where the model appears to rely on internal priors rather than grounded world knowledge. While hallucination is a common challenge across generative models, it poses notable limitations for real-world applications demanding precision, such as education, medical illustration, or scientific visualization." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 78, + 357, + 146, + 367 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 357, + 146, + 367 + ], + "spans": [ + { + "bbox": [ + 78, + 357, + 146, + 367 + ], + "type": "text", + "content": "3.3 Data Bias" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 77, + 376, + 532, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 376, + 532, + 498 + ], + "spans": [ + { + "bbox": [ + 77, + 376, + 532, + 498 + ], + "type": "text", + "content": "Despite strong alignment between text and vision modalities, GPT-4o struggles with data bias issue, which fail in generating underrepresented cultural elements and rendering non-Latin scripts such as Chinese, Japanese, and Arabic. The generated characters are often incomplete, distorted, or replaced with Latin-like approximations. These artifacts reflect underlying challenges in multilingual representation, likely due to limited exposure to diverse scripts during training and the inherent difficulty of accurate typographic rendering in pixel space. This phenomenon is emblematic of a larger issue in AI systems—data bias. The training data used to develop models like GPT-4o may disproportionately represent certain languages, cultures, and writing systems, leading to disparities in performance across different linguistic groups. These biases are not only technical limitations but also ethical concerns, as they can contribute to the exclusion of underrepresented languages and cultures from AI applications. As vision-language models are increasingly deployed globally, improving support for multilingual text remains a crucial step toward inclusive and culturally competent AI systems." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 77, + 515, + 157, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 515, + 157, + 528 + ], + "spans": [ + { + "bbox": [ + 77, + 515, + 157, + 528 + ], + "type": "text", + "content": "4 Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 77, + 540, + 531, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 540, + 531, + 640 + ], + "spans": [ + { + "bbox": [ + 77, + 540, + 531, + 640 + ], + "type": "text", + "content": "In conclusion, this work presents a comprehensive study on the development of unified vision-language generative models, with a focus on evaluating GPT-4o across a wide range of image generation tasks. Our analysis shows that GPT-4o demonstrates strong capabilities in aligning vision and language, achieving competitive results across text-to-image, image-to-image, image-to-3D, and image-to-X tasks. However, limitations remain in inconsistent generation, hallucination, and data bias in underrepresented cultural elements and non-Latin scripts, highlighting current trade-offs in model design and training data coverage. We also emphasize that architecture alone does not determine success; training data, model scale, and optimization strategies are equally critical components of progress. We hope future work will provide deeper empirical insights into such proprietary systems and clarify their position within the broader landscape of unified generative modeling." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 78, + 654, + 137, + 667 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 654, + 137, + 667 + ], + "spans": [ + { + "bbox": [ + 78, + 654, + 137, + 667 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 86, + 676, + 531, + 722 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 86, + 676, + 531, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 676, + 531, + 708 + ], + "spans": [ + { + "bbox": [ + 86, + 676, + 531, + 708 + ], + "type": "text", + "content": "[1] Hao Ai, Zidong Cao, Haonan Lu, Chen Chen, Jian Ma, Pengyuan Zhou, Tae-Kyun Kim, Pan Hui, and Lin Wang. Dream360: Diverse and immersive outdoor virtual scene creation via transformer-based 360 image outpainting. IEEE transactions on visualization and computer graphics, 2024. 34, 42" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 87, + 711, + 355, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 711, + 355, + 722 + ], + "spans": [ + { + "bbox": [ + 87, + 711, + 355, + 722 + ], + "type": "text", + "content": "[2] Ideogram AI. Ideogram. https://ideogram.ai/, 2024. 10, 11, 12" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "85" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 84 + }, + { + "para_blocks": [ + { + "bbox": [ + 83, + 72, + 533, + 722 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 87, + 72, + 532, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 72, + 532, + 94 + ], + "spans": [ + { + "bbox": [ + 87, + 72, + 532, + 94 + ], + "type": "text", + "content": "[3] Youngmin Baek, Bado Lee, Dongyoon Han, Sangdoo Yun, and Hwalsuk Lee. Character region awareness for text detection. In CVPR, 2019. 78, 79" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 86, + 96, + 533, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 96, + 533, + 118 + ], + "spans": [ + { + "bbox": [ + 86, + 96, + 533, + 118 + ], + "type": "text", + "content": "[4] Jinbin Bai, Wei Chow, Ling Yang, Xiangtai Li, Juncheng Li, Hanwang Zhang, and Shuicheng Yan. Humanedit: A high-quality human-rewarded dataset for instruction-based image editing. arXiv preprint arXiv:2412.04280, 2024. 21" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 87, + 120, + 533, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 120, + 533, + 142 + ], + "spans": [ + { + "bbox": [ + 87, + 120, + 533, + 142 + ], + "type": "text", + "content": "[5] Jinbin Bai, Zhen Dong, Aosong Feng, Xiao Zhang, Tian Ye, Kaicheng Zhou, and Mike Zheng Shou. Integrating view conditions for image synthesis. arXiv preprint arXiv:2310.16002, 2023. 21" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 87, + 144, + 533, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 144, + 533, + 175 + ], + "spans": [ + { + "bbox": [ + 87, + 144, + 533, + 175 + ], + "type": "text", + "content": "[6] Jinbin Bai, Tian Ye, Wei Chow, Enxin Song, Qing-Guo Chen, Xiangtai Li, Zhen Dong, Lei Zhu, and Shuicheng Yan. Meissonic: Revitalizing masked generative transformers for efficient high-resolution text-to-image synthesis. arXiv preprint arXiv:2410.08261, 2024. 5, 85" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 87, + 177, + 488, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 177, + 488, + 190 + ], + "spans": [ + { + "bbox": [ + 87, + 177, + 488, + 190 + ], + "type": "text", + "content": "[7] Shane Barratt and Rishi Sharma. A note on the inception score. arXiv preprint arXiv:1801.01973, 2018. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 87, + 192, + 533, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 192, + 533, + 223 + ], + "spans": [ + { + "bbox": [ + 87, + 192, + 533, + 223 + ], + "type": "text", + "content": "[8] James Betker, Gabriel Goh, Li Jing, Tim Brooks, Jianfeng Wang, Linjie Li, Long Ouyang, Juntang Zhuang, Joyce Lee, Yufei Guo, et al. Improving image generation with better captions. Computer Science. https://cdn.openai.com/papers/dall-e-3.pdf, 2023.5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 87, + 225, + 533, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 225, + 533, + 247 + ], + "spans": [ + { + "bbox": [ + 87, + 225, + 533, + 247 + ], + "type": "text", + "content": "[9] Manuel Brack, Felix Friedrich, Katharina Kornmeier, Linoy Tsaban, Patrick Schramowski, Kristian Kersting, and Apolinário Passos. *Ledits++: Limitless image editing using text-to-image models.* 2023. 21, 25" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 250, + 533, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 250, + 533, + 271 + ], + "spans": [ + { + "bbox": [ + 83, + 250, + 533, + 271 + ], + "type": "text", + "content": "[10] Tim Brooks, Aleksander Holynski, and Alexei A Efros. Instructpix2pix: Learning to follow image editing instructions. arXiv preprint arXiv:2211.09800, 2022. 21" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 83, + 274, + 533, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 274, + 533, + 295 + ], + "spans": [ + { + "bbox": [ + 83, + 274, + 533, + 295 + ], + "type": "text", + "content": "[11] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11315-11325, 2022. 85" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 83, + 297, + 533, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 297, + 533, + 319 + ], + "spans": [ + { + "bbox": [ + 83, + 297, + 533, + 319 + ], + "type": "text", + "content": "[12] Haoyu Chen, Xiaojie Xu, Wenbo Li, Jingjing Ren, Tian Ye, Songhua Liu, Ying-Cong Chen, Lei Zhu, and Xinchao Wang. Posta: A go-to framework for customized artistic poster generation. arXiv preprint arXiv:2503.14908, 2025. 10, 12" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 83, + 321, + 533, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 321, + 533, + 352 + ], + "spans": [ + { + "bbox": [ + 83, + 321, + 533, + 352 + ], + "type": "text", + "content": "[13] Liang Chen, Shuai Bai, Wenhao Chai, Weichu Xie, Haozhe Zhao, Leon Vinci, Junyang Lin, and Baobao Chang. Multimodal representation alignment for image generation: Text-image interleaved control is easier than you think. arXiv preprint arXiv:2502.20172, 2025. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 83, + 355, + 533, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 355, + 533, + 376 + ], + "spans": [ + { + "bbox": [ + 83, + 355, + 533, + 376 + ], + "type": "text", + "content": "[14] Liang-Chieh Chen, George Papandreou, Florian Schroff, and Hartwig Adam. Rethinking atrous convolution for semantic image segmentation. arXiv preprint arXiv:1706.05587, 2017. 62, 64" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 83, + 379, + 533, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 379, + 533, + 410 + ], + "spans": [ + { + "bbox": [ + 83, + 379, + 533, + 410 + ], + "type": "text", + "content": "[15] Sixiang Chen, Tian Ye, Jinbin Bai, Erkang Chen, Jun Shi, and Lei Zhu. Sparse sampling transformer with uncertainty-driven ranking for unified removal of raindrops and rain streaks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 13106-13117, 2023. 34" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 83, + 413, + 533, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 413, + 533, + 434 + ], + "spans": [ + { + "bbox": [ + 83, + 413, + 533, + 434 + ], + "type": "text", + "content": "[16] Sixiang Chen, Tian Ye, Yun Liu, and Erkang Chen. Snowformer: Context interaction transformer with scale-awareness for single image desnowing. arXiv preprint arXiv:2208.09703, 2022. 34" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 83, + 437, + 533, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 437, + 533, + 468 + ], + "spans": [ + { + "bbox": [ + 83, + 437, + 533, + 468 + ], + "type": "text", + "content": "[17] Sixiang Chen, Tian Ye, Kai Zhang, Zhaohu Xing, Yunlong Lin, and Lei Zhu. Teaching tailored to talent: Adverse weather restoration via prompt pool and depth-anything constraint. In European Conference on Computer Vision, pages 95–115. Springer, 2024. 34" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 83, + 471, + 533, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 471, + 533, + 501 + ], + "spans": [ + { + "bbox": [ + 83, + 471, + 533, + 501 + ], + "type": "text", + "content": "[18] Tianqi Chen, Yongfei Liu, Zhendong Wang, Jianbo Yuan, Quanzeng You, Hongxia Yang, and Mingyuan Zhou. Improving in-context learning in diffusion models with visual context-modulated prompts. arXiv preprint arXiv:2312.01408, 2023. 56" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 83, + 504, + 533, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 504, + 533, + 535 + ], + "spans": [ + { + "bbox": [ + 83, + 504, + 533, + 535 + ], + "type": "text", + "content": "[19] Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Janus-pro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 83, + 538, + 533, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 538, + 533, + 559 + ], + "spans": [ + { + "bbox": [ + 83, + 538, + 533, + 559 + ], + "type": "text", + "content": "[20] Marcos V. Conde, Gregor Geigle, and Radu Timofte. Instructir: High-quality image restoration following human instructions. In ECCV, 2024. 34, 35, 36, 37, 38, 39, 40" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 83, + 562, + 533, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 562, + 533, + 583 + ], + "spans": [ + { + "bbox": [ + 83, + 562, + 533, + 583 + ], + "type": "text", + "content": "[21] Runmin Cong, Yuchen Guan, Jinpeng Chen, Wei Zhang, Yao Zhao, and Sam Kwong. Sddnet: Style-guided dual-layer disentanglement network for shadow detection. In ACM MM, 2023. 69, 72" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 83, + 586, + 533, + 606 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 586, + 533, + 606 + ], + "spans": [ + { + "bbox": [ + 83, + 586, + 533, + 606 + ], + "type": "text", + "content": "[22] Ciprian Corneanu, Raghudeep Gadde, and Aleix M Martinez. Latentpaint: Image inpainting in latent space with diffusion models. In WACV, 2024. 34, 41" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 83, + 609, + 533, + 630 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 609, + 533, + 630 + ], + "spans": [ + { + "bbox": [ + 83, + 609, + 533, + 630 + ], + "type": "text", + "content": "[23] Yingying Deng, Fan Tang, Weiming Dong, Chongyang Ma, Xingjia Pan, Lei Wang, and Changsheng Xu. Stytr2: Image style transfer with transformers. In CVPR, 2022. 18" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 83, + 633, + 533, + 664 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 633, + 533, + 664 + ], + "spans": [ + { + "bbox": [ + 83, + 633, + 533, + 664 + ], + "type": "text", + "content": "[24] Runpei Dong, Chunrui Han, Yuang Peng, Zekun Qi, Zheng Ge, Jinrong Yang, Liang Zhao, Jianjian Sun, Hongyu Zhou, Haoran Wei, et al. Dreamlmm: Synergistic multimodal comprehension and creation. arXiv preprint arXiv:2309.11499, 2023. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 83, + 667, + 533, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 667, + 533, + 689 + ], + "spans": [ + { + "bbox": [ + 83, + 667, + 533, + 689 + ], + "type": "text", + "content": "[25] Wei Dong, Han Zhou, Yuqiong Tian, Jingke Sun, Xiaohong Liu, Guangtao Zhai, and Jun Chen. Shadowrefiner: Towards mask-free shadow removal via fast fourier transformer. arXiv preprint arXiv:2406.02559. 44" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 83, + 691, + 533, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 691, + 533, + 722 + ], + "spans": [ + { + "bbox": [ + 83, + 691, + 533, + 722 + ], + "type": "text", + "content": "[26] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. 1" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "86" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 85 + }, + { + "para_blocks": [ + { + "bbox": [ + 83, + 72, + 533, + 723 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 83, + 72, + 533, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 72, + 533, + 105 + ], + "spans": [ + { + "bbox": [ + 83, + 72, + 533, + 105 + ], + "type": "text", + "content": "[27] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first international conference on machine learning*, 2024. 10, 11, 47, 51" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 106, + 533, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 106, + 533, + 139 + ], + "spans": [ + { + "bbox": [ + 83, + 106, + 533, + 139 + ], + "type": "text", + "content": "[28] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 12873-12883, 2021. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 84, + 141, + 533, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 141, + 533, + 163 + ], + "spans": [ + { + "bbox": [ + 84, + 141, + 533, + 163 + ], + "type": "text", + "content": "[29] Aosong Feng, Weikang Qiu, Jinbin Bai, Kaicheng Zhou, Zhen Dong, Xiao Zhang, Rex Ying, and Leandros Tassiulas. An item is worth a prompt: Versatile image editing with disentangled control. arXiv preprint arXiv:2403.04880, 2024. 21" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 84, + 165, + 533, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 165, + 533, + 186 + ], + "spans": [ + { + "bbox": [ + 84, + 165, + 533, + 186 + ], + "type": "text", + "content": "[30] Tsu-Jui Fu, Wenze Hu, Xianzhi Du, William Yang Wang, Yinfei Yang, and Zhe Gan. Guiding instruction-based image editing via multimodal large language models. ICLR, 2024. 21, 22, 23, 24" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 84, + 189, + 533, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 189, + 533, + 211 + ], + "spans": [ + { + "bbox": [ + 84, + 189, + 533, + 211 + ], + "type": "text", + "content": "[31] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. ICLR, 2023. 28" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 84, + 213, + 533, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 213, + 533, + 236 + ], + "spans": [ + { + "bbox": [ + 84, + 213, + 533, + 236 + ], + "type": "text", + "content": "[32] Jun Gao, Tianchang Shen, Zian Wang, Wenzheng Chen, Kangxue Yin, Daiqing Li, Or Litany, Zan Gojcic, and Sanja Fidler. Get3d: A generative model of high quality 3d textured shapes learned from images. NeurIPS, 2022. 58" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 84, + 237, + 533, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 237, + 533, + 258 + ], + "spans": [ + { + "bbox": [ + 84, + 237, + 533, + 258 + ], + "type": "text", + "content": "[33] Leon A Gatys, Alexander S Ecker, and Matthias Bethge. Image style transfer using convolutional neural networks. CVPR, 2016. 18" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 84, + 261, + 533, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 261, + 533, + 293 + ], + "spans": [ + { + "bbox": [ + 84, + 261, + 533, + 293 + ], + "type": "text", + "content": "[34] Yuying Ge, Sijie Zhao, Jinguo Zhu, Yixiao Ge, Kun Yi, Lin Song, Chen Li, Xiaohan Ding, and Ying Shan. Seed-x: Multimodal models with unified multi-granularity comprehension and generation. arXiv preprint arXiv:2404.14396, 2024.1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 84, + 295, + 533, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 295, + 533, + 318 + ], + "spans": [ + { + "bbox": [ + 84, + 295, + 533, + 318 + ], + "type": "text", + "content": "[35] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks. Communications of the ACM, 63(11):139–144, 2020. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 84, + 319, + 533, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 319, + 533, + 351 + ], + "spans": [ + { + "bbox": [ + 84, + 319, + 533, + 351 + ], + "type": "text", + "content": "[36] Yuchao Gu, Xintao Wang, Jay Zhangjie Wu, Yujun Shi, Yunpeng Chen, Zihan Fan, Wuyou Xiao, Rui Zhao, Shuning Chang, Weijia Wu, et al. Mix-of-show: Decentralized low-rank adaptation for multi-concept customization of diffusion models. In NeurIPS, 2024. 28" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 84, + 354, + 533, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 354, + 533, + 376 + ], + "spans": [ + { + "bbox": [ + 84, + 354, + 533, + 376 + ], + "type": "text", + "content": "[37] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. NeurIPS, 2017. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 84, + 377, + 533, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 377, + 533, + 400 + ], + "spans": [ + { + "bbox": [ + 84, + 377, + 533, + 400 + ], + "type": "text", + "content": "[38] Qibin Hou, Yuying Ge, Jing Zhang, Yuchao Dai, and Ming-Ming Cheng. Storydiffusion: Consistent self-attention for long-range image and video generation. In Advances in Neural Information Processing Systems (NeurIPS), 2024. 31, 32" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 84, + 402, + 533, + 424 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 402, + 533, + 424 + ], + "spans": [ + { + "bbox": [ + 84, + 402, + 533, + 424 + ], + "type": "text", + "content": "[39] Qiming Hu, Hainuo Wang, and Xiaojie Guo. Single image reflection separation via dual-stream interactive transformers. Advances in Neural Information Processing Systems, 37:55228-55248, 2024. 45" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 84, + 426, + 533, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 426, + 533, + 449 + ], + "spans": [ + { + "bbox": [ + 84, + 426, + 533, + 449 + ], + "type": "text", + "content": "[40] Jiancheng Huang, Yi Huang, Jianzhuang Liu, Donghao Zhou, Yifan Liu, and Shifeng Chen. Dual-schedule inversion: Training-and tuning-free inversion for real image editing. arXiv preprint arXiv:2412.11152, 2024. 21" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 84, + 449, + 533, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 449, + 533, + 482 + ], + "spans": [ + { + "bbox": [ + 84, + 449, + 533, + 482 + ], + "type": "text", + "content": "[41] Kaiyi Huang, Chengqi Duan, Kaiyue Sun, Enze Xie, Zhenguo Li, and Xihui Liu. T2i-compbench++: An enhanced and comprehensive benchmark for compositional text-to-image generation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2025. 5" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 84, + 483, + 533, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 483, + 533, + 506 + ], + "spans": [ + { + "bbox": [ + 84, + 483, + 533, + 506 + ], + "type": "text", + "content": "[42] Lianghua Huang, Wei Wang, Zhi-Fan Wu, Yupeng Shi, Huanzhang Dou, Chen Liang, Yutong Feng, Yu Liu, and Jingren Zhou. In-context lora for diffusion transformers. arXiv preprint arXiv:2410.23775, 2024. 56" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 84, + 508, + 533, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 508, + 533, + 529 + ], + "spans": [ + { + "bbox": [ + 84, + 508, + 533, + 529 + ], + "type": "text", + "content": "[43] Xun Huang and Serge Belongie. Arbitrary style transfer in real-time with adaptive instance normalization. In ICCV, 2017. 18" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 84, + 532, + 533, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 532, + 533, + 555 + ], + "spans": [ + { + "bbox": [ + 84, + 532, + 533, + 555 + ], + "type": "text", + "content": "[44] Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, and Furu Wei. Layoutlmv3: Pre-training for document ai with unified text and image masking. In ACM MM, 2022. 77" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 84, + 557, + 533, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 557, + 533, + 578 + ], + "spans": [ + { + "bbox": [ + 84, + 557, + 533, + 578 + ], + "type": "text", + "content": "[45] Zixuan Huang, Stefan Stojanov, Anh Thai, Varun Jampani, and James M Rehg. Planes vs. chairs: Category-guided 3d shape learning without any 3d cues. In ECCV, 2022. 58" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 84, + 580, + 533, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 580, + 533, + 602 + ], + "spans": [ + { + "bbox": [ + 84, + 580, + 533, + 602 + ], + "type": "text", + "content": "[46] Jiaxiu Jiang, Yabo Zhang, Kailai Feng, Xiaohe Wu, Wenbo Li, Renjing Pei, Fan Li, and Wangmeng Zuo. Mc2: Multi-concept guidance for customized multi-concept generation. arXiv preprint arXiv:2404.05268, 2024. 28" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 84, + 605, + 533, + 626 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 605, + 533, + 626 + ], + "spans": [ + { + "bbox": [ + 84, + 605, + 533, + 626 + ], + "type": "text", + "content": "[47] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In ECCV, 2016. 18" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 84, + 628, + 533, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 628, + 533, + 661 + ], + "spans": [ + { + "bbox": [ + 84, + 628, + 533, + 661 + ], + "type": "text", + "content": "[48] Bingxin Ke, Anton Obukhov, Shengyu Huang, Nando Metzger, Rodrigo Caye Daudt, and Konrad Schindler. Repurposing diffusion-based image generators for monocular depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 76" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 84, + 662, + 533, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 662, + 533, + 685 + ], + "spans": [ + { + "bbox": [ + 84, + 662, + 533, + 685 + ], + "type": "text", + "content": "[49] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In ICCV, 2023. 56" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 84, + 687, + 533, + 709 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 687, + 533, + 709 + ], + "spans": [ + { + "bbox": [ + 84, + 687, + 533, + 709 + ], + "type": "text", + "content": "[50] Nupur Kumari, Bingliang Zhang, Richard Zhang, Eli Shechtman, and Jun-Yan Zhu. Multi-concept customization of text-to-image diffusion. In CVPR, 2023. 28" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 84, + 711, + 533, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 711, + 533, + 723 + ], + "spans": [ + { + "bbox": [ + 84, + 711, + 533, + 723 + ], + "type": "text", + "content": "[51] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024. 2, 5, 8, 9, 10, 11, 47, 48, 49" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "87" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 86 + }, + { + "para_blocks": [ + { + "bbox": [ + 83, + 72, + 533, + 723 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 83, + 72, + 533, + 104 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 72, + 533, + 104 + ], + "spans": [ + { + "bbox": [ + 83, + 72, + 533, + 104 + ], + "type": "text", + "content": "[52] Bolin Lai, Felix Juefei-Xu, Miao Liu, Xiaoliang Dai, Nikhil Mehta, Chenguang Zhu, Zeyi Huang, James M Rehg, Sang-min Lee, Ning Zhang, et al. Unleashing in-context learning of autoregressive models for few-shot image manipulation. arXiv preprint arXiv:2412.01027, 2024. 56" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 106, + 436, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 106, + 436, + 118 + ], + "spans": [ + { + "bbox": [ + 83, + 106, + 436, + 118 + ], + "type": "text", + "content": "[53] Jiachen Li, Jitesh Jain, and Humphrey Shi. Matting anything. arXiv: 2306.05399, 2023. 66" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 84, + 120, + 532, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 120, + 532, + 142 + ], + "spans": [ + { + "bbox": [ + 84, + 120, + 532, + 142 + ], + "type": "text", + "content": "[54] Jiachen Li, Jitesh Jain, and Humphrey Shi. Matting anything. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1775–1785, 2024. 68" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 84, + 144, + 533, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 144, + 533, + 175 + ], + "spans": [ + { + "bbox": [ + 84, + 144, + 533, + 175 + ], + "type": "text", + "content": "[55] Junyi Li, Zhilu Zhang, Xiaoyu Liu, Chaoyu Feng, Xiaotao Wang, Lei Lei, and Wangmeng Zuo. Spatially adaptive self-supervised learning for real-world image denoising. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2023. 34" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 83, + 177, + 533, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 177, + 533, + 199 + ], + "spans": [ + { + "bbox": [ + 83, + 177, + 533, + 199 + ], + "type": "text", + "content": "[56] Yachuan Li, Xavier Soria Poma, Yun Bai, Qian Xiao, Chaozhi Yang, Guanlin Li, and Zongmin Li. Edmb: Edge detector with mamba. arXiv preprint arXiv:2501.04846, 2025. 66, 67" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 83, + 201, + 533, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 201, + 533, + 222 + ], + "spans": [ + { + "bbox": [ + 83, + 201, + 533, + 222 + ], + "type": "text", + "content": "[57] Yijun Li, Chen Fang, Jimei Yang, Zhaowen Wang, Xin Lu, and Ming-Hsuan Yang. Universal style transfer via feature transforms. In NIPS, 2017. 18" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 83, + 225, + 533, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 225, + 533, + 247 + ], + "spans": [ + { + "bbox": [ + 83, + 225, + 533, + 247 + ], + "type": "text", + "content": "[58] Zijie Li, Henry Li, Yichun Shi, Amir Barati Farimani, Yuval Kluger, Linjie Yang, and Peng Wang. Dual diffusion for unified image generation and understanding. arXiv preprint arXiv:2501.00289, 2024. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 248, + 533, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 248, + 533, + 271 + ], + "spans": [ + { + "bbox": [ + 83, + 248, + 533, + 271 + ], + "type": "text", + "content": "[59] Zhexin Liang, Zhaochen Li, Shangchen Zhou, Chongyi Li, and Chen Change Loy. Control color: Multimodal diffusion-based interactive image colorization. arXiv preprint arXiv:2402.10855, 2024. 34, 43" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 83, + 273, + 533, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 273, + 533, + 295 + ], + "spans": [ + { + "bbox": [ + 83, + 273, + 533, + 295 + ], + "type": "text", + "content": "[60] Xin Lin, Chao Ren, Kelvin CK Chan, Lu Qi, Jinshan Pan, and Ming-Hsuan Yang. Multi-task image restoration guided by robust dino features. arXiv preprint arXiv:2312.01677, 2023. 34" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 83, + 297, + 533, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 297, + 533, + 319 + ], + "spans": [ + { + "bbox": [ + 83, + 297, + 533, + 319 + ], + "type": "text", + "content": "[61] Xin Lin, Chao Ren, and Xiao Liu. Unsupervised image denoising in real-world scenarios via self-collaboration parallel generative adversarial branches. In ICCV, 2023. 34" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 83, + 321, + 533, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 321, + 533, + 351 + ], + "spans": [ + { + "bbox": [ + 83, + 321, + 533, + 351 + ], + "type": "text", + "content": "[62] Xin Lin, Jingtong Yue, Sixian Ding, Chao Ren, Lu Qi, and Ming-Hsuan Yang. Dual degradation representation for joint deraining and low-light enhancement in the dark. IEEE Transactions on Circuits and Systems for Video Technology, 2024. 34" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 83, + 354, + 533, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 354, + 533, + 376 + ], + "spans": [ + { + "bbox": [ + 83, + 354, + 533, + 376 + ], + "type": "text", + "content": "[63] Xin Lin, Yuyan Zhou, Jingtong Yue, Chao Ren, Kelvin CK Chan, Lu Qi, and Ming-Hsuan Yang. Re-boosting self-collaboration parallel prompt gan for unsupervised image restoration. arXiv preprint arXiv:2408.09241, 2024. 34" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 83, + 379, + 533, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 379, + 533, + 410 + ], + "spans": [ + { + "bbox": [ + 83, + 379, + 533, + 410 + ], + "type": "text", + "content": "[64] Bingchen Liu, Ehsan Akhgari, Alexander Visheratin, Aleks Kamko, Linmiao Xu, Shivam Shrirao, Chase Lambert, Joao Souza, Suhail Doshi, and Daiqing Li. Playground v3: Improving text-to-image alignment with deep-fusion large language models. arXiv preprint arXiv:2409.10695, 2024. 10, 12, 14" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 83, + 412, + 533, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 412, + 533, + 434 + ], + "spans": [ + { + "bbox": [ + 83, + 412, + 533, + 434 + ], + "type": "text", + "content": "[65] Dongyang Liu, Shitian Zhao, Le Zhuo, Weifeng Lin, Yu Qiao, Hongsheng Li, and Peng Gao. Lumina-mgpt: Illuminate flexible photorealistic text-to-image generation with multimodal generative pretraining, 2024. 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 83, + 436, + 533, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 436, + 533, + 468 + ], + "spans": [ + { + "bbox": [ + 83, + 436, + 533, + 468 + ], + "type": "text", + "content": "[66] Haipeng Liu, Yang Wang, Biao Qian, Meng Wang, and Yong Rui. Structure matters: Tackling the semantic discrepancy in diffusion models for image inpainting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 34, 42" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 83, + 470, + 533, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 470, + 533, + 491 + ], + "spans": [ + { + "bbox": [ + 83, + 470, + 533, + 491 + ], + "type": "text", + "content": "[67] Hao Liu, Wilson Yan, Matei Zaharia, and Pieter Abbeel. World model on million-length video and language with ringattention. arXiv preprint arXiv:2402.08268, 2024. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 83, + 494, + 533, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 494, + 533, + 524 + ], + "spans": [ + { + "bbox": [ + 83, + 494, + 533, + 524 + ], + "type": "text", + "content": "[68] Minghua Liu, Chao Xu, Haian Jin, Linghao Chen, Mukund Varma T, Zexiang Xu, and Hao Su. One-2-3-45: Any single image to 3d mesh in 45 seconds without per-shape optimization. Advances in Neural Information Processing Systems, 2023. 58" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 83, + 527, + 533, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 527, + 533, + 558 + ], + "spans": [ + { + "bbox": [ + 83, + 527, + 533, + 558 + ], + "type": "text", + "content": "[69] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object. In Proceedings of the IEEE/CVF international conference on computer vision, 2023. 58" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 83, + 561, + 533, + 593 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 561, + 533, + 593 + ], + "spans": [ + { + "bbox": [ + 83, + 561, + 533, + 593 + ], + "type": "text", + "content": "[70] Xiaoxiao Long, Yuan-Chen Guo, Cheng Lin, Yuan Liu, Zhiyang Dou, Lingjie Liu, Yuexin Ma, Song-Hai Zhang, Marc Habermann, Christian Theobalt, et al. Wonder3d: Single image to 3d using cross-domain diffusion. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2024. 58" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 83, + 595, + 533, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 595, + 533, + 616 + ], + "spans": [ + { + "bbox": [ + 83, + 595, + 533, + 616 + ], + "type": "text", + "content": "[71] Aaron Lou, Chenlin Meng, and Stefano Ermon. Discrete diffusion modeling by estimating the ratios of the data distribution. arXiv preprint arXiv:2310.16834, 2023. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 83, + 619, + 533, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 619, + 533, + 651 + ], + "spans": [ + { + "bbox": [ + 83, + 619, + 533, + 651 + ], + "type": "text", + "content": "[72] Yiyang Ma, Xingchao Liu, Xiaokang Chen, Wen Liu, Chengyue Wu, Zhiyu Wu, Zizheng Pan, Zhenda Xie, Haowei Zhang, Liang Zhao, et al. Janusflow: Harmonizing autoregression and rectified flow for unified multimodal understanding and generation. arXiv preprint arXiv:2411.07975, 2024. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 83, + 653, + 533, + 675 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 653, + 533, + 675 + ], + "spans": [ + { + "bbox": [ + 83, + 653, + 533, + 675 + ], + "type": "text", + "content": "[73] Chenlin Meng, Kristy Choi, Jiaming Song, and Stefano Ermon. Concrete score matching: Generalized score matching for discrete data. Advances in Neural Information Processing Systems, 35:34532-34545, 2022. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 83, + 677, + 533, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 677, + 533, + 708 + ], + "spans": [ + { + "bbox": [ + 83, + 677, + 533, + 708 + ], + "type": "text", + "content": "[74] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2019. 58" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 83, + 711, + 454, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 711, + 454, + 723 + ], + "spans": [ + { + "bbox": [ + 83, + 711, + 454, + 723 + ], + "type": "text", + "content": "[75] Midjourney. Midjourney. https://www.midjourney.com, 2024. 2, 6, 7, 18, 19, 20, 59, 60, 61" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "88" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 87 + }, + { + "para_blocks": [ + { + "bbox": [ + 83, + 72, + 533, + 723 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 83, + 72, + 533, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 72, + 533, + 95 + ], + "spans": [ + { + "bbox": [ + 83, + 72, + 533, + 95 + ], + "type": "text", + "content": "[76] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 2021. 58" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 96, + 533, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 96, + 533, + 129 + ], + "spans": [ + { + "bbox": [ + 83, + 96, + 533, + 129 + ], + "type": "text", + "content": "[77] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2020. 58" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 84, + 130, + 458, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 130, + 458, + 142 + ], + "spans": [ + { + "bbox": [ + 84, + 130, + 458, + 142 + ], + "type": "text", + "content": "[78] OpenAI. Addendum to gpt-4o system card: 4o image generation, 2025. Accessed: 2025-04-02. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 84, + 145, + 533, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 145, + 533, + 175 + ], + "spans": [ + { + "bbox": [ + 84, + 145, + 533, + 175 + ], + "type": "text", + "content": "[79] Junyi Pan, Xiaoguang Han, Weikai Chen, Jiapeng Tang, and Kui Jia. Deep mesh reconstruction from single rgb images via topology modification networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2019. 58" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 84, + 178, + 533, + 210 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 178, + 533, + 210 + ], + "spans": [ + { + "bbox": [ + 84, + 178, + 533, + 210 + ], + "type": "text", + "content": "[80] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed AA Osman, Dimitrios Tzionas, and Michael J Black. Expressive body capture: 3d hands, face, and body from a single image. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2019. 58" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 84, + 213, + 533, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 213, + 533, + 243 + ], + "spans": [ + { + "bbox": [ + 84, + 213, + 533, + 243 + ], + "type": "text", + "content": "[81] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 84, + 247, + 533, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 247, + 533, + 278 + ], + "spans": [ + { + "bbox": [ + 84, + 247, + 533, + 278 + ], + "type": "text", + "content": "[82] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. SDXL: Improving latent diffusion models for high-resolution image synthesis. In The Twelfth International Conference on Learning Representations (ICLR), 2024. 1, 47, 50" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 84, + 281, + 533, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 281, + 533, + 312 + ], + "spans": [ + { + "bbox": [ + 84, + 281, + 533, + 312 + ], + "type": "text", + "content": "[83] Guocheng Qian, Jinjie Mai, Abdullah Hamdi, Jian Ren, Aliaksandr Siarohin, Bing Li, Hsin-Ying Lee, Ivan Skorokhodov, Peter Wonka, Sergey Tulyakov, et al. Magic123: One image to high-quality 3d object generation using both 2d and 3d diffusion priors. arXiv preprint arXiv:2306.17843, 2023. 58" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 84, + 315, + 533, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 315, + 533, + 336 + ], + "spans": [ + { + "bbox": [ + 84, + 315, + 533, + 336 + ], + "type": "text", + "content": "[84] Chu-Jie Qin, Rui-Qi Wu, Zikun Liu, Xin Lin, Chun-Le Guo, Hyun Hee Park, and Chongyi Li. Restore anything with masks: Leveraging mask image modeling for blind all-in-one image restoration. In ECCV, 2024. 34" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 84, + 338, + 533, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 338, + 533, + 360 + ], + "spans": [ + { + "bbox": [ + 84, + 338, + 533, + 360 + ], + "type": "text", + "content": "[85] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 84, + 363, + 533, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 363, + 533, + 393 + ], + "spans": [ + { + "bbox": [ + 84, + 363, + 533, + 393 + ], + "type": "text", + "content": "[86] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Radle, Chloe Rolland, Laura Gustafson, et al. SAM 2: Segment anything in images and videos. *ICLR*, 2025. 80, 81, 82, 83, 84" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 84, + 397, + 533, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 397, + 533, + 418 + ], + "spans": [ + { + "bbox": [ + 84, + 397, + 533, + 418 + ], + "type": "text", + "content": "[87] Jeremy Reizenstein, Roman Shapovalov, Philipp Henzler, Luca Sbordone, Patrick Labatut, and David Novotny. Common objects in 3d: Large-scale learning and evaluation of real-life 3d category reconstruction. In ICCV, 2021. 58" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 84, + 421, + 533, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 421, + 533, + 442 + ], + "spans": [ + { + "bbox": [ + 84, + 421, + 533, + 442 + ], + "type": "text", + "content": "[88] Bin Ren, Yawei Li, Nancy Mehta, and Radu Timofte. The ninth nitire 2024 efficient super-resolution challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2024. 34" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 84, + 445, + 533, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 445, + 533, + 476 + ], + "spans": [ + { + "bbox": [ + 84, + 445, + 533, + 476 + ], + "type": "text", + "content": "[89] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR), pages 10684-10695, 2022. 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 84, + 479, + 533, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 479, + 533, + 510 + ], + "spans": [ + { + "bbox": [ + 84, + 479, + 533, + 510 + ], + "type": "text", + "content": "[90] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10684-10695, June 2022. 47, 52" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 84, + 513, + 533, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 513, + 533, + 534 + ], + "spans": [ + { + "bbox": [ + 84, + 513, + 533, + 534 + ], + "type": "text", + "content": "[91] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In CVPR, 2023. 28" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 84, + 537, + 533, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 537, + 533, + 568 + ], + "spans": [ + { + "bbox": [ + 84, + 537, + 533, + 568 + ], + "type": "text", + "content": "[92] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 2022. 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 84, + 571, + 533, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 571, + 533, + 602 + ], + "spans": [ + { + "bbox": [ + 84, + 571, + 533, + 602 + ], + "type": "text", + "content": "[93] Subham Sahoo, Marianne Arriola, Yair Schiff, Aaron Gokaslan, Edgar Marroquin, Justin Chiu, Alexander Rush, and Volodymyr Kuleshov. Simple and effective masked diffusion language models. Advances in Neural Information Processing Systems, 37:130136-130184, 2024. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 84, + 605, + 533, + 626 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 605, + 533, + 626 + ], + "spans": [ + { + "bbox": [ + 84, + 605, + 533, + 626 + ], + "type": "text", + "content": "[94] Qingyu Shi, Lu Qi, Jianzong Wu, Jinbin Bai, Jingbo Wang, Yunhai Tong, Xiangtai Li, and Ming-Husan Yang. Relation- booth: Towards relation-aware customized object generation. arXiv preprint arXiv:2410.23280, 2024. 28" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 84, + 629, + 533, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 629, + 533, + 660 + ], + "spans": [ + { + "bbox": [ + 84, + 629, + 533, + 660 + ], + "type": "text", + "content": "[95] Haoze Sun, Wenbo Li, Jianzhuang Liu, Haoyu Chen, Renjing Pei, Xueyi Zou, Youliang Yan, and Yujiu Yang. Coser: Bridging image and language for cognitive super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 25868-25878, 2024. 34" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 84, + 663, + 533, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 663, + 533, + 685 + ], + "spans": [ + { + "bbox": [ + 84, + 663, + 533, + 685 + ], + "type": "text", + "content": "[96] Quan Sun, Qiying Yu, Yufeng Cui, Fan Zhang, Xiaosong Zhang, Yueze Wang, Hongcheng Gao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative pretraining in multimodality. arXiv preprint arXiv:2307.05222, 2023.1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 84, + 687, + 533, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 687, + 533, + 708 + ], + "spans": [ + { + "bbox": [ + 84, + 687, + 533, + 708 + ], + "type": "text", + "content": "[97] Alexander Swerdlow, Mihir Prabhudesai, Siddharth Gandhi, Deepak Pathak, and Katerina Fragkiadaki. Unified multimodal discrete diffusion. arXiv preprint arXiv:2503.20853, 2025. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 84, + 711, + 533, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 711, + 533, + 723 + ], + "spans": [ + { + "bbox": [ + 84, + 711, + 533, + 723 + ], + "type": "text", + "content": "[98] Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024. 1" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "89" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 88 + }, + { + "para_blocks": [ + { + "bbox": [ + 79, + 72, + 533, + 721 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 83, + 72, + 533, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 72, + 533, + 123 + ], + "spans": [ + { + "bbox": [ + 83, + 72, + 533, + 123 + ], + "type": "text", + "content": "[99] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 2, 3, 5, 6, 7, 8, 9, 10, 12, 14, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 59, 60, 61, 63, 64, 65, 67, 68, 70, 71, 72, 73, 75, 76, 77, 78, 79, 81, 82, 83, 84" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 79, + 127, + 533, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 127, + 533, + 159 + ], + "spans": [ + { + "bbox": [ + 79, + 127, + 533, + 159 + ], + "type": "text", + "content": "[100] Shengbang Tong, David Fan, Jiachen Zhu, Yunyang Xiong, Xinlei Chen, Koustuv Sinha, Michael Rabbat, Yann LeCun, Saining Xie, and Zhuang Liu. Metamorph: Multimodal understanding and generation via instruction tuning. arXiv preprint arXiv:2412.14164, 2024. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 79, + 161, + 533, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 161, + 533, + 183 + ], + "spans": [ + { + "bbox": [ + 79, + 161, + 533, + 183 + ], + "type": "text", + "content": "[101] Chunwei Wang, Guansong Lu, Junwei Yang, Runhui Huang, Jianhua Han, Lu Hou, Wei Zhang, and Hang Xu. Illumine: Illuminating your llms to see, draw, and self-enhance. arXiv preprint arXiv:2412.06673, 2024. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 79, + 185, + 533, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 185, + 533, + 207 + ], + "spans": [ + { + "bbox": [ + 79, + 185, + 533, + 207 + ], + "type": "text", + "content": "[102] Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, and Yu-Gang Jiang. Pixel2mesh: Generating 3d mesh models from single rgb images. In Proceedings of the European conference on computer vision (ECCV), 2018. 58" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 79, + 209, + 533, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 209, + 533, + 232 + ], + "spans": [ + { + "bbox": [ + 79, + 209, + 533, + 232 + ], + "type": "text", + "content": "[103] Xierui Wang, Siming Fu, Qihan Huang, Wanggui He, and Hao Jiang. Ms-diffusion: Multi-subject zero-shot image personalization with layout guidance. arXiv preprint arXiv:2406.07209, 2024. 28, 30" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 79, + 234, + 533, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 234, + 533, + 257 + ], + "spans": [ + { + "bbox": [ + 79, + 234, + 533, + 257 + ], + "type": "text", + "content": "[104] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 79, + 259, + 533, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 259, + 533, + 281 + ], + "spans": [ + { + "bbox": [ + 79, + 259, + 533, + 281 + ], + "type": "text", + "content": "[105] Zhendong Wang, Yifan Jiang, Yadong Lu, Pengcheng He, Weizhu Chen, Zhangyang Wang, Mingyuan Zhou, et al. In-context learning unlocked for diffusion models. NeurIPS, 2023. 56" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 79, + 284, + 533, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 284, + 533, + 305 + ], + "spans": [ + { + "bbox": [ + 79, + 284, + 533, + 305 + ], + "type": "text", + "content": "[106] Zhenyu Wang, Aoxue Li, Zhenguo Li, and Xihui Liu. Genartist: Multimodal llm as an agent for unified image generation and editing. NeurIPS, 2024. 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 79, + 308, + 533, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 308, + 533, + 330 + ], + "spans": [ + { + "bbox": [ + 79, + 308, + 533, + 330 + ], + "type": "text", + "content": "[107] Alex Warren, Ke Xu, Jiaying Lin, Gary KL Tam, and Rynson WH Lau. Effective video mirror detection with inconsistent motion cues. In CVPR, 2024. 69, 71" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 79, + 333, + 533, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 333, + 533, + 355 + ], + "spans": [ + { + "bbox": [ + 79, + 333, + 533, + 355 + ], + "type": "text", + "content": "[108] Jianzong Wu, Chao Tang, Jingbo Wang, Yanhong Zeng, Xiangtai Li, and Yunhai Tong. Diffensei: Bridging multi-modal lms and diffusion models for customized manga generation. CVPR, 2025. 31, 33" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 79, + 357, + 533, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 357, + 533, + 388 + ], + "spans": [ + { + "bbox": [ + 79, + 357, + 533, + 388 + ], + "type": "text", + "content": "[109] Size Wu, Wenwei Zhang, Lumin Xu, Sheng Jin, Zhonghua Wu, Qingyi Tao, Wentao Liu, Wei Li, and Chen Change Loy. Harmonizing visual representations for unified multimodal understanding and generation. arXiv preprint arXiv:2503.21979, 2025. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 79, + 392, + 533, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 392, + 533, + 423 + ], + "spans": [ + { + "bbox": [ + 79, + 392, + 533, + 423 + ], + "type": "text", + "content": "[110] Yecheng Wu, Zhuoyang Zhang, Junyu Chen, Haotian Tang, Dacheng Li, Yunhao Fang, Ligeng Zhu, Enze Xie, Hongxu Yin, Li Yi, et al. Vila-u: a unified foundation model integrating visual understanding and generation. arXiv preprint arXiv:2409.04429, 2024. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 79, + 426, + 533, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 426, + 533, + 448 + ], + "spans": [ + { + "bbox": [ + 79, + 426, + 533, + 448 + ], + "type": "text", + "content": "[111] Yifan Xia, Yuying Ge, Jing Zhang, Yuchao Dai, and Ming-Ming Cheng. Seed-story: Multimodal long story generation with large language model. arXiv preprint arXiv:2407.08683, 2024. 31, 32" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 79, + 450, + 533, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 450, + 533, + 472 + ], + "spans": [ + { + "bbox": [ + 79, + 450, + 533, + 472 + ], + "type": "text", + "content": "[112] Shitao Xiao, Yueze Wang, Junjie Zhou, Huaying Yuan, Xingrun Xing, Ruiran Yan, Shuting Wang, Tiejun Huang, and Zheng Liu. Omnigen: Unified image generation. arXiv preprint arXiv:2409.11340, 2024. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 79, + 475, + 533, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 475, + 533, + 506 + ], + "spans": [ + { + "bbox": [ + 79, + 475, + 533, + 506 + ], + "type": "text", + "content": "[113] Jiale Xu, Weihao Cheng, Yiming Gao, Xintao Wang, Shenghua Gao, and Ying Shan. Instantmesh: Efficient 3d mesh generation from a single image with sparse-view large reconstruction models. arXiv preprint arXiv:2404.07191, 2024.58" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 79, + 509, + 533, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 509, + 533, + 540 + ], + "spans": [ + { + "bbox": [ + 79, + 509, + 533, + 540 + ], + "type": "text", + "content": "[114] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10371-10381, 2024. 74, 75" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 79, + 544, + 533, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 544, + 533, + 566 + ], + "spans": [ + { + "bbox": [ + 79, + 544, + 533, + 566 + ], + "type": "text", + "content": "[115] Ling Yang, Zhaochen Yu, Chenlin Meng, Minkai Xu, Stefano Ermon, and Bin Cui. Mastering text-to-image diffusion: Recaptioning, planning, and generating with multimodal llms. In ICML, 2024. 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 79, + 568, + 522, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 568, + 522, + 581 + ], + "spans": [ + { + "bbox": [ + 79, + 568, + 522, + 581 + ], + "type": "text", + "content": "[116] Hang Yu, Ruilin Li, Shaorong Xie, and Jiayan Qiu. Shadow-eligible image outpainting. In CVPR, 2024. 34, 42" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 79, + 583, + 533, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 583, + 533, + 613 + ], + "spans": [ + { + "bbox": [ + 79, + 583, + 533, + 613 + ], + "type": "text", + "content": "[117] Haobo Yuan, Xiangtai Li, Tao Zhang, Zilong Huang, Shilin Xu, Shunping Ji, Yunhai Tong, Lu Qi, Jiashi Feng, and Ming-Hsuan Yang. Sa2va: Marrying sam2 with llava for dense grounded understanding of images and videos. arXiv, 2025. 62, 63" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 79, + 617, + 533, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 617, + 533, + 639 + ], + "spans": [ + { + "bbox": [ + 79, + 617, + 533, + 639 + ], + "type": "text", + "content": "[118] Yu Yuan, Xijun Wang, Yichen Sheng, Prateek Chennuri, Xingguang Zhang, and Stanley Chan. Generative photography: Scene-consistent camera control for realistic text-to-image synthesis. arXiv preprint arXiv:2412.02168, 2024. 53, 54, 55" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 79, + 642, + 533, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 642, + 533, + 673 + ], + "spans": [ + { + "bbox": [ + 79, + 642, + 533, + 673 + ], + "type": "text", + "content": "[119] Cheng Zhang, Qianyi Wu, Camilo Cruz Gambardella, Xiaoshui Huang, Dinh Phung, Wanli Ouyang, and Jianfei Cai. Taming stable diffusion for text to " + }, + { + "bbox": [ + 79, + 642, + 533, + 673 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 79, + 642, + 533, + 673 + ], + "type": "text", + "content": " panorama image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 17" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 79, + 676, + 533, + 698 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 676, + 533, + 698 + ], + "spans": [ + { + "bbox": [ + 79, + 676, + 533, + 698 + ], + "type": "text", + "content": "[120] Kai Zhang, Lingbo Mo, Wenhu Chen, Huan Sun, and Yu Su. Magicbrush: A manually annotated dataset for instruction-guided image editing. In NeurIPS, 2023. 21, 25, 26, 27" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 79, + 700, + 533, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 700, + 533, + 721 + ], + "spans": [ + { + "bbox": [ + 79, + 700, + 533, + 721 + ], + "type": "text", + "content": "[121] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models, 2023. 47" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "90" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 89 + }, + { + "para_blocks": [ + { + "bbox": [ + 77, + 72, + 534, + 339 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 78, + 72, + 533, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 533, + 94 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 533, + 94 + ], + "type": "text", + "content": "[122] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Scaling in-the-wild training for diffusion-based illumination harmonization and editing by imposing consistent light transport. In ICLR, 2025. 34, 46" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 96, + 534, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 96, + 534, + 118 + ], + "spans": [ + { + "bbox": [ + 77, + 96, + 534, + 118 + ], + "type": "text", + "content": "[123] Wenwei Zhang, Jiangmiao Pang, Kai Chen, and Chen Change Loy. K-net: Towards unified image segmentation. Advances in Neural Information Processing Systems, 34:10326-10338, 2021. 62, 65" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 78, + 121, + 533, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 121, + 533, + 152 + ], + "spans": [ + { + "bbox": [ + 78, + 121, + 533, + 152 + ], + "type": "text", + "content": "[124] Xinchen Zhang, Ling Yang, Guohao Li, Yaqi Cai, Jiake Xie, Yong Tang, Yujiu Yang, Mengdi Wang, and Bin Cui. Itercomp: Iterative composition-aware feedback learning from model gallery for text-to-image generation. arXiv preprint arXiv:2410.07171, 2024.5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 78, + 154, + 533, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 154, + 533, + 176 + ], + "spans": [ + { + "bbox": [ + 78, + 154, + 533, + 176 + ], + "type": "text", + "content": "[125] Yuxuan Zhang, Yiren Song, Jiaming Liu, Rui Wang, Jinpeng Yu, Hao Tang, Huaxia Li, Xu Tang, Yao Hu, Han Pan, et al. Ssr-encoder: Encoding selective subject representation for subject-driven generation. In CVPR, 2024. 28" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 78, + 178, + 533, + 200 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 178, + 533, + 200 + ], + "spans": [ + { + "bbox": [ + 78, + 178, + 533, + 200 + ], + "type": "text", + "content": "[126] Chuyang Zhao, Yuxing Song, Wenhao Wang, Haocheng Feng, Errui Ding, Yifan Sun, Xinyan Xiao, and Jingdong Wang. Monofrformer: One transformer for both diffusion and autoregression. arXiv preprint arXiv:2409.16280, 2024. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 78, + 202, + 533, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 202, + 533, + 224 + ], + "spans": [ + { + "bbox": [ + 78, + 202, + 533, + 224 + ], + "type": "text", + "content": "[127] Peng Zheng, Dehong Gao, Deng-Ping Fan, Li Liu, Jorma Laaksonen, Wanli Ouyang, and Nicu Sebe. Bilateral reference for high-resolution dichotomous image segmentation. CCAI, 2024. 69, 70, 73" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 78, + 226, + 533, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 226, + 533, + 257 + ], + "spans": [ + { + "bbox": [ + 78, + 226, + 533, + 257 + ], + "type": "text", + "content": "[128] Chunting Zhou, Lili Yu, Arun Babu, Kushal Tirumala, Michihiro Yasunaga, Leonid Shamis, Jacob Kahn, Xuezhe Ma, Luke Zettlemoyer, and Omer Levy. Transfusion: Predict the next token and diffuse images with one multi-modal model. arXiv preprint arXiv:2408.11039, 2024. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 78, + 260, + 533, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 260, + 533, + 290 + ], + "spans": [ + { + "bbox": [ + 78, + 260, + 533, + 290 + ], + "type": "text", + "content": "[129] Donghao Zhou, Jiancheng Huang, Jinbin Bai, Jiaze Wang, Hao Chen, Guangyong Chen, Xiaowei Hu, and Pheng-Ann Heng. MagicTailor: Component-controllable personalization in text-to-image diffusion models. arXiv preprint arXiv:2410.13370, 2024. 28" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 78, + 293, + 533, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 293, + 533, + 316 + ], + "spans": [ + { + "bbox": [ + 78, + 293, + 533, + 316 + ], + "type": "text", + "content": "[130] Zhiyu Zhu, Yingcong Chen, Zhenyu Xie, and Jingyi Yu. Disenvisioner: Disentangled and enriched visual prompt for customized image generation. arXiv preprint arXiv:2410.02067, 2024. 28, 29" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 78, + 318, + 533, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 318, + 533, + 339 + ], + "spans": [ + { + "bbox": [ + 78, + 318, + 533, + 339 + ], + "type": "text", + "content": "[131] Silvia Zuffi, Angjoo Kanazawa, and Michael J Black. Lions and tigers and bears: Capturing non-rigid, 3d, articulated shape from images. In CVPR, 2018. 58" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "91" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 90 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06311/24099640-7df8-4696-ab55-8a633ecbda84_content_list.json b/data/2025/2504_06xxx/2504.06311/24099640-7df8-4696-ab55-8a633ecbda84_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..994dfacf7dc2a8f339e32e3b2c7f0972f82dccf5 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/24099640-7df8-4696-ab55-8a633ecbda84_content_list.json @@ -0,0 +1,3286 @@ +[ + { + "type": "text", + "text": "Bifrost: Reinventing WiFi Signals Based on Dispersion Effect for Accurate Indoor Localization", + "text_level": 1, + "bbox": [ + 176, + 94, + 818, + 142 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yimiao Sun, Yuan He*, Jiacheng Zhang, Xin Na, Yande Chen, Weiguo Wang, Xiuzhen Guo", + "bbox": [ + 137, + 157, + 857, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "School of Software and BNrist, Tsinghua University", + "bbox": [ + 316, + 175, + 679, + 190 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "sym21@mails.tsinghua.edu.cn, heyuan@tsinghua.edu.cn", + "bbox": [ + 308, + 191, + 689, + 205 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{zhangjc21,nx20,cyd22,wwg18}@mails.tsinghua.edu.cn, guoxiuzhen94@gmail.com", + "bbox": [ + 222, + 205, + 774, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 83, + 229, + 187, + 243 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "WiFi-based device localization is a key enabling technology for smart applications, which has attracted numerous research studies in the past decade. Most of the existing approaches rely on Line-of-Sight (LoS) signals to work, while a critical problem is often neglected: In the real-world indoor environments, WiFi signals are everywhere, but very few of them are usable for accurate localization. As a result, the localization accuracy in practice is far from being satisfactory. This paper presents Bifrost, a novel hardware-software co-design for accurate indoor localization. The core idea of Bifrost is to reinvent WiFi signals, so as to provide sufficient LoS signals for localization. This is realized by exploiting the dispersion effect of signals emitted by the leaky wave antenna (LWA). We present a low-cost plug-in design of LWA that can generate orthogonal polarized signals: On one hand, LWA disperses signals of different frequencies to different angles, thus providing Angle-of-Arrival (AoA) information for the localized target. On the other hand, the target further leverages the antenna polarization mismatch to distinguish AoAs from different LWAs. In the software layer, fine-grained information in Channel State Information (CSI) is exploited to cope with multipath and noise. We implement Bifrost and evaluate its performance under various settings. The results show that the median localization error of Bifrost is $0.81\\mathrm{m}$ , which is $52.35\\%$ less than that of SpotFi, a state-of-the-art approach. SpotFi, when combined with Bifrost to work in the realistic settings, can reduce the localization error by $33.54\\%$ .", + "bbox": [ + 81, + 248, + 483, + 593 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "CCS CONCEPTS", + "text_level": 1, + "bbox": [ + 83, + 607, + 218, + 619 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "- Networks $\\rightarrow$ Location based services; $\\cdot$ Information systems $\\rightarrow$ Location based services;", + "bbox": [ + 83, + 627, + 478, + 654 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "KEYWORDS", + "text_level": 1, + "bbox": [ + 83, + 667, + 196, + 681 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "WiFi Localization, Indoor Localization, Leaky Wave Antenna, RF Computing", + "bbox": [ + 81, + 686, + 480, + 714 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{\\dagger}$ Yuan He is the corresponding author.", + "bbox": [ + 83, + 729, + 264, + 743 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/908672f03a05015deaed74a1844456cfe761d014f09263752aad0d47f951d0ba.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 89, + 782, + 210, + 816 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This work is licensed under a Creative Commons Attribution International 4.0 License.", + "bbox": [ + 84, + 819, + 480, + 843 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "SenSys'23,November 12-17,2023,Istanbul,Turkiye", + "bbox": [ + 84, + 844, + 323, + 854 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "© 2023 Copyright held by the owner/author(s).", + "bbox": [ + 84, + 854, + 313, + 864 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM ISBN 979-8-4007-0414-7/23/11...$15.00", + "bbox": [ + 84, + 864, + 287, + 875 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://doi.org/10.1145/3625687.3625786", + "bbox": [ + 84, + 875, + 292, + 885 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/5288f746a3d9c59551d74b46534fb59e7bfd78a7e34567f04627a892543b58df.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 555, + 227, + 867, + 362 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/3ff4fad4b6922ac9b775808d80c2e52e64e0bdd0222238fb46302e9792d486a7.jpg", + "image_caption": [ + "(b)", + "Figure 1: A model-driven method works well when (a) sufficient LoS signals are available but becomes inaccurate when (b) NLoS signals have to be used." + ], + "image_footnote": [], + "bbox": [ + 552, + 387, + 841, + 522 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM Reference Format:", + "text_level": 1, + "bbox": [ + 514, + 601, + 661, + 612 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yimiao Sun, Yuan He, Jiacheng Zhang, Xin Na, Yande Chen, Weiguo Wang, Xiuzhen Guo. 2023. Bifrost: Reinventing WiFi Signals Based on Dispersion Effect for Accurate Indoor Localization. In The 21st ACM Conference on Embedded Networked Sensor Systems (SenSys '23), November 12-17, 2023, Istanbul, Türkiye. ACM, New York, NY, USA, 14 pages. https://doi.org/10.1145/3625687.3625786", + "bbox": [ + 513, + 613, + 915, + 688 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 514, + 700, + 697, + 715 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Location information [32, 63, 79, 80] is crucial, especially for smart indoor applications [50, 60, 67, 72], such as smart home [54, 62], indoor navigation [7, 18, 19, 59] and so on. Due to the ubiquitous deployment of WiFi access points (APs) and wide availability of WiFi modules on the devices, WiFi-based localization [16, 25, 49, 56, 57, 61, 64, 65, 68-70, 73, 74, 82] appears to be promising for indoor localization. The existing works of WiFi-based indoor localization can be broadly grouped into two categories, data-driven methods and model-driven methods.", + "bbox": [ + 511, + 719, + 913, + 843 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Data-driven methods are typically represented by fingerprint [14, 44, 61]. These methods need to collect Received Signal Strength (RSS) or CSI at different places to construct a database mapping RSS", + "bbox": [ + 513, + 844, + 913, + 886 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "376", + "bbox": [ + 485, + 922, + 514, + 934 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/dd3fffaab64c2aa57f33b3095c34259cf6579acc3062073b260c8ec1623d4fbf.jpg", + "image_caption": [ + "(a) Library (48 rooms)" + ], + "image_footnote": [], + "bbox": [ + 122, + 102, + 263, + 222 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e49e4eeeefff0ee45a136194aac6af3b509d72117db7264bff2f5e731f512942.jpg", + "image_caption": [ + "(b) Office (54 rooms)", + "Figure 2: The number of LoS APs in each room in a library and an office building." + ], + "image_footnote": [], + "bbox": [ + 300, + 102, + 442, + 223 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "(or CSI) with locations, which is a labor-intensive process. Also, their performance may be vulnerable to dynamic environments.", + "bbox": [ + 81, + 287, + 482, + 316 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Model-driven methods induce less labor cost and attract more research studies. Generally, a model-driven method calculates the location by estimating signals' Angle-of-Arrival (AoA) [2, 23, 24, 69], Time-of-Flight (ToF) [70, 81] or both [9, 16, 43]. Most of the existing approaches rely on Line-of-Sight (LoS) signals to work, as Fig. 1(a) illustrates, while a critical problem is often neglected: In the real-world indoor environments, WiFi signals are everywhere, but very few of them are usable for accurate localization. As an example to validate this finding, Fig. 2 plots the statistics of the real deployment of WiFi APs in a library (48 rooms) and an office building (54 rooms). The data shows that in nearly half of all the rooms, there is not even one LoS AP available. The rooms with sufficient LoS signals account for less than $5\\%$ of all the rooms. In other words, the chance for a WiFi device to receive sufficient LoS WiFi AP signals, namely the case for it to be accurately localized by using an existing approach, is less than $5\\%$ . That well explains why the practical performance of using the existing localization approaches is far from being satisfactory.", + "bbox": [ + 81, + 316, + 482, + 564 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A straightforward idea to address the above problem is to increase the number of deployed WiFi APs, until everywhere is covered by at least 3 LoS APs. It isn't practical, however. Taking the library and office building investigated in Fig. 2 as an example, typically there are 50 rooms in a building. Covering every room with 3 APs requires 150 APs to be deployed, which means multiple drawbacks, such as substantial deployment cost of cables (connecting the APs), overly crowded wireless spectrum, and frequent interference and collisions in the wireless communication.", + "bbox": [ + 81, + 565, + 482, + 686 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This paper presents a novel approach called Bifrost, a plug-and-play and cost-effective scheme to significantly enhance the availability of LoS WiFi signals and in turn the localization accuracy. In light of the research progress on leaky wave antenna (LWA) in recent years [21, 22, 42, 47, 48, 76], Bifrost exploits dispersion effect of wireless signals [33]. Deployed in the space covered by WiFi signals, a LWA can receive those signals and then radiate them at different frequencies towards different directions, exhibiting frequency and spatial division multiplexing (FSDM) features, as is reinventing2 WiFi signals.", + "bbox": [ + 81, + 688, + 482, + 828 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/823419a8c889392fc2b02cd1e4c1b440b94537a0158211bb0cbf7bc569ae4ec1.jpg", + "image_caption": [ + "Figure 3: The high-level principle of Bifrost." + ], + "image_footnote": [], + "bbox": [ + 535, + 103, + 890, + 368 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Fig. 3 illustrates the high-level principle of Bifrost. To localize a target device, Bifrost uses two LWAs to transform WiFi signals into FSDM signals, so the target device will receive two LoS FSDM signals with a unique pair of frequencies. Since the frequency and the propagation direction of FSDM signals are coupled, the target device can estimate its AoAs to both LWAs by analyzing the received spectrum and then calculate its location.", + "bbox": [ + 513, + 424, + 913, + 520 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Compared with using WiFi APs, using LWA to assist localization offers the following two distinct advantages:", + "bbox": [ + 514, + 521, + 913, + 550 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) Cost-effective. The cost of a LWA in Bifrost is 7.41 USD (4.36 USD for the material cost and 3.05 USD for the control module), which is significantly lower than that of a WiFi AP (typically $30 \\sim 100$ USD [3-6]).", + "2) Easy to Use. Deploying a LWA is very convenient. It can operate in a plug-and-play manner without the need for connecting power cables." + ], + "bbox": [ + 516, + 551, + 913, + 652 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Leveraging these two advantages, Bifrost can be easily implemented in any environment with WiFi coverage, no matter whether the WiFi signals are LoS or not. Bifrost can either work independently, or cooperatively with other conventional WiFi-based localization methods.", + "bbox": [ + 513, + 654, + 913, + 720 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The design of Bifrost tackles several critical challenges, which are summarized as follows:", + "bbox": [ + 513, + 720, + 913, + 748 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Ambiguity between Different LWAs. As Fig. 3 shows, a target device may receive signals from two LWAs, which are reinvented from the same WiFi signal source. Without a special design, it is almost impossible for the target to distinguish one LWA from the other. To overcome this problem, the LWAs in Bifrost are designed to generate orthogonal circular polarized (CP) signals, so that they won't mix up with each other (§3.1). Polarization of LWA signals can be conveniently switched by altering the input port of WiFi signals, without the need for reconstruction or modifications to the LWA's structure.", + "bbox": [ + 511, + 752, + 913, + 890 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye", + "bbox": [ + 83, + 71, + 401, + 85 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Yimiao Sun, et al.", + "bbox": [ + 803, + 71, + 913, + 85 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "1 In Norse mythology, Bifrost is a rainbow bridge that reaches between Midgard (Earth) and Asgard (the realm of gods). \n2 The word \"reinventing\" means that Bifrost makes WiFi signals look different from their original form by using the LWAs. The signal emitted by the LWAs has two new properties, dispersion effect and circular polarization.", + "bbox": [ + 81, + 839, + 482, + 892 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "377", + "bbox": [ + 485, + 922, + 514, + 934 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/2109f2078048336a2c4a81a265742db1ac720ab5b8a945f966c7e74d9179e936.jpg", + "image_caption": [ + "(a) Linear polarization (LP)." + ], + "image_footnote": [], + "bbox": [ + 83, + 103, + 277, + 208 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/5bb17e241be2f79ada098e07caca36cc7af2ffba3ddb0c74fca93996c1870e12.jpg", + "image_caption": [ + "(b) Circular polarization (CP)." + ], + "image_footnote": [], + "bbox": [ + 282, + 103, + 506, + 210 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/bbed68b7c7c2a6dffa3e458e459a9cd13deffd3c1a4950f2c36f70d280dc1402.jpg", + "image_caption": [ + "Figure 4: The properties of polarized electromagnetic waves." + ], + "image_footnote": [], + "bbox": [ + 513, + 103, + 728, + 215 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/de065f1018fd63684ca76984649d3bbde8bb74dc9f72d0dd5cb8341546d23646.jpg", + "image_caption": [ + "(c) Elliptical polarization (EP).", + "(d) CP signal synthesis." + ], + "image_footnote": [], + "bbox": [ + 730, + 103, + 890, + 215 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Signal Extraction from the Interfered Frequency Band. Since FSDM signals radiated by LWAs are transformed from existing WiFi signals, the two types of signals operate within the same frequency band and can be simultaneously received by a target device. Directly using such signals leads to erroneous AoA estimation. To deal with such interference, LWAs in Bifrost work in a duty-cycled manner. The target device is able to detect distinctive variation of the signal amplitude at the frequencies of FSDM signals (§3.3). By analyzing WiFi CSI, the target device can effectively extract the desired FSDM signals from the interfered frequency band.", + "bbox": [ + 81, + 281, + 483, + 419 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Indoor Multipath Effect. The multipath effect in the indoor environment may seriously affect the quality of the received FSDM signals and further affect the localization accuracy. In order to identify FSDM signals propagating along the LoS path, Bifrost operates in two steps. First, we map frequencies of FSDM signals with subcarriers in CSI and cluster adjacent subcarriers to only retain the cluster with the highest energy (§3.4). Second, we take the intersection of two clusters (corresponding to the two orthogonal CP signals), and determine the final frequency by weighting the center frequency of the remaining clustered subcarriers (§3.5).", + "bbox": [ + 81, + 421, + 483, + 561 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our contributions can be summarized as follows:", + "bbox": [ + 98, + 561, + 398, + 575 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) We tackle a significant problem, namely the limited availability of LoS signals, which is overlooked by the existing works on WiFi-based indoor localization. We reinvent WiFi signals by exploiting the dispersion effect, which represents a new direction of utilizing LWAs.", + "2) We address a series of non-trivial challenges, such as signal ambiguity, interference, and multipath effect, etc. The design of Bifrost effectively ensures the quality of signals used for localization.", + "3) We implement Bifrost and evaluate its performance under various settings. The results show that the median localization error of Bifrost is $0.81\\mathrm{m}$ , which is $52.35\\%$ less than that of SpotFi, a state-of-the-art approach. SpotFi, when combined with Bifrost to work in the realistic settings, can reduce the localization error by $33.54\\%$ ." + ], + "bbox": [ + 86, + 578, + 482, + 792 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This paper proceeds as follows: §2 introduces background knowledge on the signal polarization and the LWA. Then §3 unfolds the design of Bifrost in both hardware and software. The implementation and evaluation results are presented in §4. We discuss practical issues in §5 and summarize related works in §6. This work is concluded in §7.", + "bbox": [ + 81, + 794, + 483, + 876 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 PRIMER", + "text_level": 1, + "bbox": [ + 514, + 280, + 619, + 292 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This section introduces preliminary knowledge of our work: polarization of wireless signals and leaky wave antenna.", + "bbox": [ + 513, + 297, + 913, + 325 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Signal Polarization", + "text_level": 1, + "bbox": [ + 514, + 339, + 710, + 354 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Polarization is a fundamental property of wireless signals, including FSDM and WiFi signals investigated in this work. It represents the direction of the signal's electric field, which can be denoted as $\\vec{E}$", + "bbox": [ + 513, + 357, + 911, + 398 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "and can be decomposed into the horizontal component $\\overrightarrow{E_x}$ and the vertical component $\\overrightarrow{E_y}$ . There will be a phase difference $\\Delta \\phi \\in [0, \\pi)$ between these two orthogonal components, leading to the following elliptic equation", + "bbox": [ + 513, + 402, + 910, + 460 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{c} - \\overrightarrow {E _ {x}} - 2 \\\\ \\frac {E _ {x 0}}{E _ {x 0}} \\end{array} + \\frac {E _ {y}}{E _ {y 0}} - \\frac {2 \\overrightarrow {E _ {x}} \\overrightarrow {E _ {y}}}{E _ {x 0} E _ {y 0}} \\cos (\\Delta \\phi) = \\sin^ {2} (\\Delta \\phi),\n$$\n", + "text_format": "latex", + "bbox": [ + 550, + 467, + 870, + 505 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $E$ and $E$ are amplitudes of $\\overrightarrow{E}$ and $\\overrightarrow{E}$ . According to the", + "bbox": [ + 513, + 503, + 908, + 532 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "value of $\\Delta \\phi$ , the polarization of $\\overrightarrow{E}$ can be divided into the following three categories:", + "bbox": [ + 513, + 532, + 908, + 561 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "When $\\Delta \\phi = 0$ or $\\pi$ : we have $\\overrightarrow{E_y} = \\frac{E_{y0}}{\\pm E_{x0}}\\overrightarrow{E_x}$ , so the signal is linear polarized (LP), as shown in Fig. 4(a). The polarization direction hinges on $\\pm \\frac{E_{y0}}{E_{x0}}$ , the ratio of $\\overrightarrow{E_x}$ and $\\overrightarrow{E_y}$ .", + "bbox": [ + 513, + 565, + 911, + 619 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "When $\\Delta \\phi = \\pm \\frac{\\pi}{2}$ : we have $\\vec{E}^2 + \\vec{E}^2 = \\vec{E}^2$ , and now the signal is circular polarized (CP), as Fig. 4(b) illustrates. Besides, Fig. 4(d) provides another perspective on how the CP signal is decomposed into two LP signals. Depending on whether $\\Delta \\phi$ is positive or negative, the rotation direction of the CP signal is in either left-hand circular polarization (LHCP) or right-hand circular polarization (RHCP), which are orthogonal and won't interfere with each other.", + "bbox": [ + 513, + 621, + 913, + 720 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "When $\\Delta \\phi$ is Other Values: the signal is elliptical polarized (EP), as Fig. 4(c) depicts. Similar to the CP signal, the EP signal also can be divided into left-hand or right-hand.", + "bbox": [ + 513, + 724, + 913, + 767 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Impact of Polarization on the Rx: The polarization of a signal is accorded with that of its transmitting antenna but may change during propagation. To ensure effective reception, it should match the polarization of the receiving antenna, partially at least. Fig. 5 illustrates how polarization mismatch affects the received signal strength (RSS).", + "bbox": [ + 513, + 768, + 911, + 852 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For the LP signal and antenna, RSS decreases as the angle of these two polarization directions increases from $0^{\\circ}$ to $90^{\\circ}$ . For the CP signal, the signal can be decomposed into two orthogonal LP", + "bbox": [ + 513, + 852, + 911, + 893 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Bifrost", + "bbox": [ + 84, + 71, + 138, + 85 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye", + "bbox": [ + 593, + 71, + 913, + 87 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "378", + "bbox": [ + 485, + 922, + 514, + 934 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/9c50c21f9f2753fbea75feba6489560ab1bb6bd4e497c4e119371cab5de559c0.jpg", + "image_caption": [ + "Figure 5: RSS variation according to the polarization of signals and Rx." + ], + "image_footnote": [], + "bbox": [ + 84, + 103, + 482, + 351 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "signals. Thus, the LP antenna can only receive the component whose polarization direction is parallel to itself but loses half of the signal energy. Similarly, the CP antenna can only receive half of the LP signal's energy. However, when LHCP antenna is used to receive RHCP signals or vice versa, RSS is theoretically zero because these two polarizations are orthogonal. That is the reason why Bifrost can eliminate the ambiguity of two FSDM signals radiated from different LWAs.", + "bbox": [ + 81, + 414, + 482, + 523 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 Leaky Wave Antenna", + "text_level": 1, + "bbox": [ + 83, + 537, + 302, + 553 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "LWA belongs to the class of traveling-wave antennas, where the propagating wave inside the antenna structure can \"leak\" (i.e., radiate) from the waveguide to the free space, hence the name. It can distinctively couple the leaky wave's frequency and radiation direction to produce a frequency and spatial division multiplexing (FSDM) signal, as shown in Fig. 6. Specifically, direction of the", + "bbox": [ + 81, + 556, + 483, + 640 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\text {s i g n a l} \\overrightarrow {E _ {f}} \\text {w i t h f r e q u e n c y} f \\text {c a n b e d e t e r m i n e d b y [ 7 1 ] :}\n$$\n", + "text_format": "latex", + "bbox": [ + 83, + 645, + 411, + 672 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\theta (f) = \\operatorname {a r c c o s} _ {k _ {0} (f)} ^ {\\prime} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 209, + 672, + 480, + 691 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\beta (f)$ and $k_{0}(f)$ are the phase constant along the LWA and the propagation constant in the free space w.r.t $E_{f}$ [52].", + "bbox": [ + 81, + 699, + 482, + 728 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Currently, two main types of LWAs have been extensively studied. 1) The uniform LWA, which employs a metallic waveguide with a slit cut along its length [21, 22, 42, 76], as depicted in Fig. 6(b). The FSDM signal leaked from a uniform LWA can only propagate towards the forward region (i.e., $\\theta^0$ , $90^\\circ$ ). 2) The periodic LWA, which is typically designed using a dielectric substrate with a periodic array of metal strips (i.e., slots) [10-13] and similar to an antenna array, as shown in Fig. 6(a). The FSDM signal of this type of LWA can propagate towards both forward and backward regions (i.e., $\\theta^0$ , $180^\\circ$ ) [33].", + "bbox": [ + 81, + 728, + 482, + 866 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Periodic LWA has been widely studied in recent research due to its versatile slot design and low-cost fabrication using the printed", + "bbox": [ + 81, + 866, + 478, + 893 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/36b40804dcad7a772c81b3692c34777cfdd86cb7e5c8e87d5c00d280bc55e7a9.jpg", + "image_caption": [ + "Figure 6: Typical structures of leaky wave antenna3." + ], + "image_footnote": [], + "bbox": [ + 519, + 103, + 911, + 229 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "circuit board (PCB) process. These attributes have made it a popular choice in various applications. Bifrost also employs the periodic structure to produce circular polarized signals.", + "bbox": [ + 513, + 273, + 913, + 315 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 BIFROST", + "text_level": 1, + "bbox": [ + 514, + 327, + 625, + 340 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we first articulate how to design the circular polarized LWA (i.e., CPLWA) to transform the input LP signal into the CP signal with the FSDM feature. Then, we present details of our approach of localization with the CPLWA.", + "bbox": [ + 513, + 344, + 913, + 401 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "1.1 CPLWA Design", + "text_level": 1, + "bbox": [ + 514, + 412, + 683, + 430 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Unlike many traditional LWAs [10, 13, 21, 22], Bifrost utilizes $\\mathrm{CP^4}$ (i.e., RHCP and LHCP) to distinguish different LWAs and corresponding FSDM signals. We specially design a CPLWA that can generate both LHCP and RHCP signals. As shown in Fig. 7(a), our CPLWA has both vertical and horizontal slots to generate orthogonal LP signals, and further to form the CP signal (the bifurcation is designed for performance optimization). According to Eq. (1), a $\\frac{\\pi}{2}$ phase difference between two LP signals is necessary to generate the CP signal, and this is achieved by adjusting the length of the slots. Denoting the guided wavelength at $5.25\\mathrm{GHz}$ of the substrate material is $\\lambda_{g}$ , the distance between the center of the horizontal and the vertical slots is $\\frac{\\lambda_g}{4}$ .", + "bbox": [ + 513, + 431, + 913, + 604 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the fabrication process of CPLWA, we adopt a two-layer copper-clad substrate structure, as shown in Fig. 7(b). The substrate material is F4BM-2, whose permittivity $\\epsilon = 3.02$ . The top and bottom layers of the substrate consist of copper and have undergone tin immersion plating to prevent oxidation. The bottom layer of copper functions as the ground, and the shorting vias are incorporated to penetrate the substrate, connecting the top and bottom layers in order to ground the top layer. These shorting vias are periodically arranged on the upper and lower boundaries of the substrate and the patch.", + "bbox": [ + 513, + 603, + 913, + 741 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The final structure of our proposed CPLWA is depicted in Fig. 7(c), where multiple units are linearly arranged together to enhance the directivity of the FSDM signal, which is similar to the antenna array. Note that a CPLWA is composed of 6 units as an illustration, but 11 units are arranged in practice. This CPLWA features two ports on both ends: one is the feed port that connects to an LP antenna for absorbing the WiFi signal, and the other should connect to a", + "bbox": [ + 511, + 741, + 915, + 839 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye", + "bbox": [ + 83, + 71, + 401, + 85 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Yimiao Sun, et al.", + "bbox": [ + 803, + 71, + 913, + 85 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "It is worth noting that the 2D radiation pattern is used here for illustration purposes. In reality, the radiation pattern of the leaky wave with a specific frequency is more like a cone, with a generatrix along the propagation direction of the traveling wave.", + "bbox": [ + 513, + 849, + 913, + 881 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "4Unless otherwise specified, CP signals stand for both RHCP and LHCP signals.", + "bbox": [ + 514, + 881, + 887, + 893 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "379", + "bbox": [ + 485, + 922, + 514, + 934 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/7d0df6daa0e60f943e48d51af1e118545c15659453e80866a132fc5dabc9f7dc.jpg", + "image_caption": [ + "(a) Unit of the LWA." + ], + "image_footnote": [], + "bbox": [ + 122, + 103, + 274, + 205 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/a4811d8b72e0b779962645388f118c8dadfe40b3817213814d85446162eba8f8.jpg", + "image_caption": [ + "(b) Layered structure." + ], + "image_footnote": [], + "bbox": [ + 285, + 108, + 441, + 199 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/77325214c9a5a697d9c27756c1cfc8c1669913385909b07abdad9bc4bfa1f232.jpg", + "image_caption": [ + "(c) Complete design.", + "Figure 7: General view of CPLWA used in Bifrost." + ], + "image_footnote": [], + "bbox": [ + 88, + 227, + 478, + 291 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "matched $50\\Omega$ load. By changing the signal feed port, polarization of the FSDM signal can switch between LHCP and RHCP. If the input signal has gone through all slots and reached the other end, yet still has energy remaining, the matched load will absorb the excess signal.", + "bbox": [ + 81, + 352, + 480, + 421 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The CPLWA used in Bifrost is specially designed at 5.17GHz-5.33GHz WiFi band, while this structure and design methodology are universally applicable for other frequencies and bandwidths by properly modifying the relevant parameters.", + "bbox": [ + 81, + 422, + 480, + 477 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Now we conduct a quick validation to show the key performance of the proposed CPLWA using ANSYS HFSS. Firstly, the direction of the FSDM signal $w$ r.t different frequencies is depicted in Fig. 8(a). There is a total $22^{\\circ}$ field of view (FoV) across the operating frequency band (5.17GHz-5.33GHz). Note that when the LP signal is fed into the right port or left port, the RHCP or LHCP signal will be radiated from $22^{\\circ}$ to $44^{\\circ}$ or $136^{\\circ}$ to $158^{\\circ}$ , respectively. Fig. 8(b) shows the energy distribution of signals at five different frequencies. It is evident that the energy of the leaky signal concentrates on the correct direction, and their realized gains are all above 11.5dB. Therefore, the direction can be easily identified by examining the energy distribution of signals.", + "bbox": [ + 81, + 478, + 482, + 643 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "With the proposed CPLWA, we will proceed with elaborating on the core localization algorithm in Bifrost.", + "bbox": [ + 81, + 643, + 482, + 671 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "1.2 Basic Localization Model", + "text_level": 1, + "bbox": [ + 83, + 684, + 326, + 698 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Let $S_{l}$ and $S_{r}$ respectively denote LHCP and RHCP signals that propagate from corresponding LWAs to the target via the LoS paths. The frequencies of these two signals, $f_{l}$ and $f_{r}$ , are what we desire for calculating the location. Recall that $S_{l}$ and $S_{r}$ are featured in frequency and space division multiplexing (FSDM) and orthogonal $\\mathrm{CP}^5$ , so these two signals won't interfere with each other. As a result, the target can estimate its relative direction to both LWAs based on the received spectrum and the radiation pattern of the two LWAs. Further, given locations of two LWAs, $L_{r}$ , $\\kappa_{r}$ , $y_{r}$ , $z_{r}$ of the RHCP LWA and $L_{l}$ , $\\kappa_{l}$ , $y_{l}$ , $z_{l}$ of the LHCP LWA, the target can output its absolute location. In detail, as we mentioned in §2, the radiation pattern of the LWA is a conical", + "bbox": [ + 81, + 702, + 482, + 869 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/b24fbb91f95a7b3b3fd9b8a04a914b391287c260958fa8809ac3f9870b57a917.jpg", + "image_caption": [ + "(a) Main beam direction.", + "Figure 8: Key results of the CPLWA." + ], + "image_footnote": [], + "bbox": [ + 517, + 108, + 720, + 247 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/6fb82dec0dccfa3d8c1ed30bcc9878629f29b2090b1d4daa1b5f62c3ee02be85.jpg", + "image_caption": [ + "(b) Realized gain." + ], + "image_footnote": [], + "bbox": [ + 730, + 108, + 913, + 247 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "surface at a specific frequency. Therefore, the location $L_{t}$ ( $x_{t}, y_{t}$ ) of the target device is the intersection point of the two conical surfaces and the horizontal plane of its height. By combining these conditions, $L_{t}$ can be estimated by solving the following equation set:", + "bbox": [ + 513, + 309, + 911, + 376 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} F \\left(L _ {r}, f _ {r}\\right), \\\\ L _ {t} = \\left(x _ {t}, y _ {t}\\right) = F \\left(L _ {l}, f _ {l}\\right) \\tag {3} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 633, + 377, + 908, + 412 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $z_{t}$ is the target's height; functions $F(r, f_{r})$ and $F(l, f_{l})$ are mathematical equations of conical surfaces with the location of LWAs as the vertex. These two equations indicate the propagation directions of RHCP and LHCP signals at frequencies $f_{r}$ and $f_{l}$ , respectively. Taking the RHCP signal as an example, $F = F(L_{r}, f_{r})$ can be formulated as", + "bbox": [ + 513, + 425, + 913, + 508 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nF = (x - x _ {r}) ^ {2} - \\frac {(y - y _ {r}) ^ {2}}{a ^ {2}} - \\frac {(z - z _ {r}) ^ {2}}{a ^ {2}}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 593, + 513, + 916, + 542 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $a = \\cot [\\theta (f_r)]$", + "bbox": [ + 513, + 544, + 650, + 558 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "However, there are two other types of signals impacting the localization accuracy when Bifrost functions: 1) LP WiFi signal that is emitted by the WiFi AP, and then received by the target. This signal establishes data communication between the target and the AP and propagates in both the LoS path and multipath. It is also the input signal of LWAs, which will be transformed into FSDM signals by the LWAs. 2) CP multipath signal that propagates from LWAs to the target after reflection, resulting in undesired noisy signals at the target.", + "bbox": [ + 511, + 559, + 913, + 683 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Thus, we should first identify the frequency of the FSDM signal from the LP WiFi signal (discussed in §3.3) and then filter out the CP multipath signal as much as possible (discussed in §3.4 and §3.5), to accurately estimate frequencies, $f_{l}$ and $f_{r}$ , and the target's location.", + "bbox": [ + 513, + 684, + 915, + 739 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "1.3 Identifying Frequencies of CP signals", + "text_level": 1, + "bbox": [ + 514, + 750, + 861, + 767 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "When Bifrost functions, LWAs need the LP WiFi signal as input, and the target device may also need it for data communication with the WiFi AP. Nevertheless, the LP signal may interfere with the reception of the CP signal, because CP antennas at the target device can receive the LP signal (as already explained in §2). To cancel this interference, we control LWAs to be periodically turned on and off, working in a duty-cycled manner. This design allows the target to identify frequencies that correspond to the CP signal by analyzing the variation in its received spectrum, and at the same", + "bbox": [ + 511, + 768, + 913, + 893 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Bifrost", + "bbox": [ + 84, + 71, + 137, + 85 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye", + "bbox": [ + 594, + 71, + 911, + 85 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "5 Unless stated otherwise, CP signals have the property of FSDM.", + "bbox": [ + 83, + 881, + 388, + 893 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "380", + "bbox": [ + 485, + 922, + 514, + 934 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/279daa2a6c56e4e94198173bb12843489cab9266b1f6d6bb7f2e5b98f6a29900.jpg", + "image_caption": [ + "(a) Normalized amplitude variation." + ], + "image_footnote": [], + "bbox": [ + 86, + 98, + 480, + 188 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/c4cebafb43021fd59b73e2a54abe86e3e358b8ba47278543a6279387ef8f17fb.jpg", + "image_caption": [ + "(b) Normalized phase variation.", + "Figure 9: Standardized CSI variation." + ], + "image_footnote": [], + "bbox": [ + 86, + 220, + 480, + 297 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "time, saves energy of LWAs. Specifically, we exploit WiFi CSI [27, 28, 75, 78] to explore fine-grained information on the amplitude and phase of the subcarriers. Fig. 9 illustrates the result of a proof-of-concept experiment, where subcarriers correspond to LoS and multipath signals are distinguishable in the normalized amplitude of CSI. However, the variation in phase is not obvious, making it challenging to discern useful subcarriers because they are often obscured by random errors and noise. According to this result, we can only extract frequencies of the CP signal based on the amplitude variation in CSI.", + "bbox": [ + 81, + 352, + 483, + 489 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As a LWA turns on or off, we denote the corresponding CSI as $H_{on}(f_k)$ and $H_{off}(f_k)$ for the $k$ -th subcarrier with center frequency $f_k$ , respectively. The former is jointly influenced by CP and LP signals, while the latter is determined by the LP signal only, leading to the following relationship:", + "bbox": [ + 83, + 489, + 483, + 560 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| H _ {o n} \\left(f _ {k}\\right) \\right\\| = \\left\\| H ^ {C P} \\left(f _ {k}\\right) + H ^ {L P} \\left(f _ {k}\\right) \\right\\|, \\\\ \\left\\| H _ {o f f} \\left(f _ {k}\\right) \\right\\| = \\left\\| H ^ {L P} \\left(f _ {k}\\right) \\right\\|, \\tag {5} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 566, + 480, + 608 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $|H^{CP}f_k|$ is the amplitude of subcarriers corresponding to the CP signal, and $\\| H^{LP}(f_k)\\|$ is that of the LP signal. Based on these two values, we can quantify the variation of CSI caused by the CP signal:", + "bbox": [ + 81, + 614, + 482, + 670 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\| \\Delta H (f) \\| = \\| H ^ {C P} (f) \\| _ {k} \\\\ = \\| H ^ {C P} \\left(f _ {k}\\right) + H ^ {L P} \\left(f _ {k}\\right) \\| - \\| H ^ {L P} \\left(f _ {k}\\right) \\| \\tag {6} \\\\ = \\| H _ {o n} \\left(f _ {k}\\right) \\| - \\| H _ {o f f} \\left(f _ {k}\\right) \\| \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 670, + 482, + 734 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In order to accurately analyze this variation and mitigate the effect of occasional outliers and noise, a Z-Score normalization procedure is performed on $\\Delta H(f_{\\mathbb{H}})$ . We execute a preliminary screening to quickly filter out the subcarriers that are less likely corresponding to the frequencies of the CP signals. A percentage threshold $\\varepsilon \\in \\mathbb{P}, 1$ is set to select subcarriers with a larger value of $\\Delta H f_{k}$ indicating that these subcarriers undergo significant changes and are more likely to be affected by the CP signal. The value of $\\varepsilon$ is chosen empirically based on the degree of multipath. Fig. 10(a) shows a high-level overview of the selected subcarriers, where LHCP and RHCP signals are highlighted in red and blue,", + "bbox": [ + 81, + 741, + 483, + 893 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/bc61a0518220150b607c1d66cc7dc6947f380c2b1009e36ae95bd6ebef2eccde.jpg", + "image_caption": [ + "(a) Selecting frequencies of CP signals." + ], + "image_footnote": [], + "bbox": [ + 517, + 104, + 916, + 253 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b8947759e9bead7e2b64a1301febbf053eed3a0fa8aef46ad73d69ef35fc88f1.jpg", + "image_caption": [ + "(b) Filtering out multipath signals." + ], + "image_footnote": [], + "bbox": [ + 517, + 292, + 916, + 391 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/c0ca63a466b48a65700cdca67ed5ce4af8d69cd93ba98cefd9921e9d28cfe97c.jpg", + "image_caption": [ + "(c) Align subcarriers.", + "Figure 10: Workflow of selecting correct frequencies (LHCP and RHCP are distinguished by red and blue colors)." + ], + "image_footnote": [], + "bbox": [ + 563, + 435, + 669, + 527 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/75fa5e29537f55972cfe2ab54947c4583cfc8202904c13b1ea132fa3d329ee66.jpg", + "image_caption": [ + "(d) Estimate frequencies." + ], + "image_footnote": [], + "bbox": [ + 723, + 434, + 867, + 520 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "respectively. In subsequent stages, we exclusively focus on these selected subcarriers.", + "bbox": [ + 513, + 612, + 911, + 640 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "1.4 Filtering out the Multipath Signal", + "text_level": 1, + "bbox": [ + 514, + 652, + 830, + 669 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As shown in Fig. 9(a), even though we have identified the frequencies of the CP signal from the WiFi signal, there still exists the multipath signal, resulting in undesired variation in $\\Delta H$ . Note that the multipath signal is mainly introduced by reflection of the CP FSDM signal. We find that subcarriers corresponding to the multipath signal can be divided into two categories: 1) Sparsely clustered subcarriers $C_s$ : FSDM signal with different frequencies and propagation directions may go through reflection at many places, but only a few of those signals reach the target with inconsecutive frequencies, resulting in many sparse clusters of subcarriers6. 2) Compactly clustered subcarriers $C_c$ : There are some FSDM signals with frequencies close to that of the LoS signal. Those FSDM signals reflect just right near the target device, which will result in a", + "bbox": [ + 511, + 670, + 916, + 851 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye", + "bbox": [ + 83, + 71, + 401, + 85 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Yimiao Sun, et al.", + "bbox": [ + 803, + 71, + 913, + 85 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "6 The polarization of the signals may flip after reflection, and we deal with it as the multipath signal in the frequency domain. Thus, this flip doesn't affect the function of our algorithm.", + "bbox": [ + 513, + 859, + 915, + 893 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "381", + "bbox": [ + 486, + 922, + 511, + 934 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "compact and wide cluster of subcarriers influenced by multipath and LoS signals.", + "bbox": [ + 81, + 106, + 480, + 133 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Here we first try to filter out $C_s$ . To do so, all the varied subcarriers are clustered, respectively, as Fig. 10(b) illustrates. Then, the following integral function will be calculated for every cluster to find the one most likely to be corresponding to the LoS signal,", + "bbox": [ + 81, + 133, + 480, + 190 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nC ^ {i} = \\begin{array}{c} \\int f _ {k} ^ {i} \\\\ f _ {k _ {\\mathrm {m i}}} ^ {i} \\end{array} \\| \\Delta H (f _ {k} ^ {i}) \\| d f _ {k} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 199, + 191, + 478, + 233 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $f_{k_{\\min}}^{i}$ and $f_{k_{\\max}}^{i}$ are the minimum and maximum frequencies of the $i$ -th cluster, respectively. The value of $C^i$ can be regarded as the area formed by the curve of $\\| \\Delta H(f_k^i)\\|$ and the two frequencies $f_{k_{\\min}}^{i},f_{k_{\\max}}^{i}$ . The wider the bandwidth and higher the amplitude of a cluster are, the greater the value of its $C^i$ is.", + "bbox": [ + 81, + 233, + 480, + 313 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "After that, we only retain the cluster that bears the highest $C^i$ , which is most likely to be $C_c$ and contains subcarriers corresponding to the LoS signal. However, as we mentioned before, some subcarriers in $C_c$ are also corresponding to the undesired multipath signal. Next, we are going to purify $C_c$ by narrowing down its frequency range as much as possible.", + "bbox": [ + 81, + 314, + 483, + 398 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "1.5 Purifying the LoS Signal for Localization Denote the frequency range of $C_c$ as $k^{\\prime}$ , $k_{\\mathrm{max}}$ for RHCP signals", + "text_level": 1, + "bbox": [ + 83, + 407, + 478, + 433 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "min", + "bbox": [ + 313, + 436, + 336, + 444 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "and $k_{\\mathrm{mi}}^l, k_{\\mathrm{max}}^l$ for LHCP signals. In both of the two ranges, we are going to find the subcarrier with the largest $\\Delta H(f_k|g)$ as Fig. 10(c) illustrates. After obtaining them, we denote the index of selected subcarriers as $K^r$ and $K^l$ . Next, as Fig. 10(c) depicts, we align $K^r$ and $K^l$ , then trim the head and tail to retain the intersection of two clusters, $\\| \\Delta H^r (f_k) \\|$ and $\\| \\Delta H^l (f_k) \\|$ . Finally, we multiply $\\| \\Delta H^r (f_k) \\|$ and $\\| \\Delta H^l (f_k) \\|$ to form a weight matrix $G$ , which is illustrated in Fig. 10(d).", + "bbox": [ + 81, + 445, + 482, + 563 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| \\Delta H ^ {r} \\left(f _ {K ^ {r} - \\delta}\\right) \\right\\| \\\\ G = \\quad \\dots \\quad \\times \\left\\| \\Delta H ^ {l} \\left(f _ {K ^ {l} - \\delta}\\right) \\right\\| \\dots \\left\\| \\Delta H ^ {l} \\left(f _ {K ^ {l} + \\delta}\\right) \\right\\| \\tag {8} \\\\ \\left\\| \\Delta H ^ {T} \\left(f _ {K ^ {T} + \\delta}\\right) \\right\\| \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 94, + 566, + 480, + 638 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "whenteh,ivahs theegh andhylvostnabatngthe wengthafverage", + "bbox": [ + 81, + 646, + 482, + 664 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "of values in $[f_{K^{\\text{r}}\\delta}, f_{K^{\\text{r}}\\delta}]$ and $f_{K^{\\text{l}}\\delta}, f_{K^{\\text{l}}\\delta}$ , which are weighted by the corresponding values in the matrix $G$ . The purpose of this step is still to mitigate the interference of the multipath signal. After that, the estimated values of the two frequencies will be fed into Eq. (4) to output an estimation of the target's location. Note that if there are multiple WiFi links for selection, one can choose the link that results in the smallest size of $\\| \\Delta H(f_k)\\|$ , meaning that the range of LoS signals' frequency is reduced to the minimum.", + "bbox": [ + 81, + 667, + 480, + 784 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Note that the basis of our localization algorithm is using the different CP signals to distinguish different LWAs, and the CP signals can't be replaced by the LP signals. The reason is that the LP signals may lead to high localization errors or even the breakdown of the localization system. Specifically, once the orientation of LP devices changes, polarization directions of these devices change accordingly. As such, each receiving antenna is very likely to receive FSDM signals from both LWAs and can't distinguish them.", + "bbox": [ + 81, + 784, + 482, + 893 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e14dd07adfafe5f6159b6067571cb4b5d052bc770da68484928b81b7d7546a66.jpg", + "image_caption": [ + "Figure 11: Hardware Settings." + ], + "image_footnote": [], + "bbox": [ + 519, + 101, + 911, + 229 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For example, a receiving antenna with $0^{\\circ}$ polarization can receive both $0^{\\circ}$ and $90^{\\circ}$ polarized FSDM signals after rotating $45^{\\circ}$ . In this case, the target can't distinguish FSDM signals from the two LWAs, and then the localization system can't work. Note that this problem can't be avoided since the target antenna's orientation isn't known in advance. In contrast, CP signals are free from this problem. The RHCP signal can't be received by LHCP antennas no matter which orientation the target antenna has.", + "bbox": [ + 511, + 253, + 913, + 369 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Next, we will proceed with describing the prototype implementation to gain insights on the performance of Bifrost in varied settings.", + "bbox": [ + 513, + 369, + 915, + 412 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4 EVALUATION", + "text_level": 1, + "bbox": [ + 514, + 414, + 671, + 429 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We evaluate the performance of Bifrost using two low-cost PCB-based LWAs working at 5.17GHz-5.33GHz and a WiFi sensing plat", + "bbox": [ + 511, + 444, + 915, + 465 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "form called PicoScenes [38] to extract CSI. When Bifrost functions, the WiFi transceiver communicates at the same band based on 802.11ax standard [1]. We first describe our implementation and evaluation settings in §4.1. Then, investigation on Bifrost's performance is four-pronged: §4.2 compares Bifrost with SpotFi [43], the state-of-the-art indoor WiFi localization technique, in a real-world indoor setting and NLoS scenarios, and then shows how the localization accuracy can be improved when Bifrost aids SpotFi to function in AP-constrained scenarios; Subsequently, in §4.3, we conduct an ablation study to evaluate the contribution of each sub-module of localization algorithm; Then, in §4.4, we dissect the impacting factors on localization accuracy, including multipath, transmission power, as well as the distance between LWAs and the AP; Also, we evaluate the influence of deploying Bifrost on data communication of WiFi transceivers in §4.5; Finally we summarize the evaluation in §4.6.", + "bbox": [ + 511, + 470, + 913, + 691 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1 Implementation and Experimental Methodology", + "text_level": 1, + "bbox": [ + 513, + 703, + 841, + 737 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Hardware and Software. Our proposed LWA is shown in Fig. 11(b). The main body of our LWA is $24.2\\mathrm{cm} \\times 5.2\\mathrm{cm}$ , containing 11 single units designed to ensure most input signals' energy can be leaked out. One of the LWA's feed ports is connected to a LP antenna for receiving the WiFi signal while the other port is connected to a $50\\Omega$ matched load to absorb the remaining energy of the signal that goes through the entire LWA structure. By switching the feed port, the polarization of the FSDM signal can be altered between LHCP and RHCP. Besides, a low-noise amplifier powered by a small rechargeable battery is utilized to boost the input signal with 0.43W power consumption. A NE555 timer IC with a load switch circuit", + "bbox": [ + 511, + 742, + 915, + 893 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Bifrost", + "bbox": [ + 84, + 71, + 137, + 85 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye", + "bbox": [ + 594, + 71, + 911, + 85 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "382", + "bbox": [ + 486, + 922, + 514, + 934 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ac1f54c4acd4c26f1fbb5b2b4901becd11f79892dd7ce7e313931dd158e5060a.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 106, + 103, + 460, + 229 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/eefd18541489eb0898ad31fa879c5762152ff2479e5c18822d2667f50c066d6b.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 106, + 252, + 460, + 382 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/79105e03737063bf0d3574cf9f3347ffdb081d0f7005a9b3f158116807912a80.jpg", + "image_caption": [ + "(c)", + "Figure 12: Experimental scenarios and deployment: (a) The hall scenario; (b) The classroom scenario; (c) The APs' deployment in the corridor and the classroom; (d) The APs' deployment in the hall and the meeting room." + ], + "image_footnote": [], + "bbox": [ + 106, + 404, + 279, + 577 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f1b54cdfe48e3f891784027e9e717ec71afd8653ef63103aea8b00ad7e668023.jpg", + "image_caption": [ + "(d)" + ], + "image_footnote": [], + "bbox": [ + 316, + 404, + 464, + 571 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "is employed to control the on-off state of the amplifier and further LWAs, resulting in a $20\\%$ duty-cycled manner for energy saving. The cost of each proposed LWA is 7.41 USD, where 4.36 USD is for the material cost and 3.05 USD is for the control circuit. To receive the CP FSDM signal, we equip the target with two $3.87\\mathrm{cm}\\times 3.87\\mathrm{cm}$ patch antennas, as Fig. 11(a) depicts. One antenna is LHCP, while the other is RHCP, and both are fixed on the antenna mount connected to COMFAST AX210 WiFi card [17] on the host computer.", + "bbox": [ + 81, + 685, + 482, + 795 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We use PicoScenes, a WiFi sensing platform, to send WiFi packets at AP with 20dBm, and extract CSI at the target. In the working band of Bifrost, PicoScenes can procure CSI data of 2025 subcarriers with indexes $[-1012, 1012]$ . We run PicoScenes on Ubuntu 20.04,", + "bbox": [ + 81, + 796, + 482, + 852 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "then analyze CSI data and execute the localization algorithm on MATLAB 2022b.", + "bbox": [ + 513, + 106, + 913, + 132 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Baseline. We compare Bifrost with SpotFi, the state-of-the-art indoor WiFi localization technique, under various settings. To ensure the validity of our results, we make our best effort to re-implement SpotFi and ensure fairness through comparison. We evaluate the performance of SpotFi by deploying multiple WiFi APs strictly based on the real-world settings of WiFi APs, as Fig. 12 shows. Before each set of experiments, we use a laser rangefinder to obtain the ground-truth, including coordinates of the target device and LWAs.", + "bbox": [ + 513, + 137, + 915, + 248 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Scenarios and Deployment. We select four typical indoor scenarios for evaluation, across different sizes and different levels of multipath effect: 1) A small-size hall (6.2m×4.5m) with few multipath; 2) A long and narrow corridor (7.5m×2.1m) with few multipath; 3) A small-size meeting room (5.7m×4.9m) with rich multipath; 4) A large-size classroom (10.6m×7.1m) with rich multipath. In each scenario, two LWAs are attached to two orthogonal walls. The target device is mounted onto tripods, keeping the height constant across all experiments.", + "bbox": [ + 511, + 251, + 916, + 377 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2 Overall Performance", + "text_level": 1, + "bbox": [ + 514, + 387, + 730, + 402 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we first evaluate the localization accuracy of Bifrost and SpotFi in real-world settings, where WiFi APs in experiments are deployed at the same positions as those in practice. Then we deploy Bifrost in the meeting room and classroom, where SpotFi doesn't work well, to enhance the performance of SpotFi, so as to see the accuracy improvement brought by Bifrost.", + "bbox": [ + 513, + 406, + 913, + 491 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Performance Comparison in Realistic Settings. In reality, most indoor WiFi APs are dispersively deployed at different locations and very likely separated from each other by walls so that LoS paths are usually obstructed. Thus, the target device is hard to establish more than one LoS connection with APs, according to our real-world investigation (i.e., Fig. 2). We evaluate the performance of SpotFi in these practical indoor settings, and also the localization error of Bifrost when deployed in the above-mentioned four scenarios. 50 locations are chosen in each scenario for location estimation. The evaluation results are reported in Fig. 13 (The solid blue line stands for Bifrost and the dashed red line stands for SpotFi).", + "bbox": [ + 511, + 493, + 913, + 645 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In the hall, both Bifrost and SpotFi are supposed to exhibit the best performance due to the low-level multipath effect, but the median error of SpotFi is $1.23\\mathrm{m}$ , which is more than $\\mathcal{Z}$ of Bifrost's $0.61\\mathrm{m}$ . This is because only one decent LoS signal can be obtained at most locations due to the blockage of walls even though three APs are deployed around. As the pie chart illustrates, SpotFi outperforms Bifrost at only 9 locations. When it comes to the corridor scenario, the median error of SpotFi increases to $1.77\\mathrm{m}$ because two of the three APs are situated inside rooms so that AoAs obtained by the target are heavily distorted. We note that the median error of Bifrost also increases to $0.76\\mathrm{m}$ . This slight performance degradation is mainly due to the extension of the localization range, which is further investigated in §4.4.", + "bbox": [ + 511, + 646, + 915, + 824 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Next, we switch to the meeting room where more pronounced multipaths exist. What's worse, there is no AP in the meeting room, more challenging for both two approaches to function. The accuracy of the two approaches is unsurprisingly degraded, where the median error is $1.95\\mathrm{m}$ in SpotFi and $0.91\\mathrm{m}$ in Bifrost. Similarly, the", + "bbox": [ + 513, + 825, + 915, + 893 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye", + "bbox": [ + 83, + 71, + 403, + 85 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Yimiao Sun, et al.", + "bbox": [ + 803, + 71, + 913, + 85 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "PicoScenes automatically interpolates the 0-th and other 32 pilot subcarriers besides 1992 tone RUs in this band.", + "bbox": [ + 81, + 869, + 482, + 892 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "383", + "bbox": [ + 486, + 922, + 514, + 934 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/20c13b4e9eca0eb9f70a5bb01dab7d7b75d52113724c5af0424c18db95b25e49.jpg", + "image_caption": [ + "(a) Hall" + ], + "image_footnote": [], + "bbox": [ + 84, + 103, + 285, + 251 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/75fa902b814efe455e4e687ee56b920319ee15e0c247276fed013941dd966a6b.jpg", + "image_caption": [ + "(b) Corridor" + ], + "image_footnote": [], + "bbox": [ + 295, + 103, + 493, + 251 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/13b58135f6e1b371566e5ed5acca1a3ad836d23cf639ccd43f0c1935dc9868e1.jpg", + "image_caption": [ + "(c) Meeting room" + ], + "image_footnote": [], + "bbox": [ + 503, + 103, + 702, + 251 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/921fa998df71dbda1001c882766862254b51e8730c8684b124dbf457256bc250.jpg", + "image_caption": [ + "(d) Classroom" + ], + "image_footnote": [], + "bbox": [ + 712, + 103, + 910, + 251 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/60f56b36363b5e62b664b4fda9fd7d15b212e2b11c0e085fa5105f820f27e554.jpg", + "image_caption": [ + "(a) The NLoS AP outdoors.", + "Figure 14: Deployment of the NLoS settings." + ], + "image_footnote": [], + "bbox": [ + 101, + 310, + 290, + 464 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/6bb200cf265600ee644a5c41e6e2c4fcc79f9f4fe3811f9f401168875b39ae3c.jpg", + "image_caption": [ + "Figure 13: Overall performance of Bifrost and SpotFi across different scenarios (The pie charts represent how many locations where each method shows a lower error).", + "(b) The NLoS AP indoors." + ], + "image_footnote": [], + "bbox": [ + 303, + 309, + 449, + 464 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "performance of SpotFi is restrained due to the lack of the LoS signal. Bifrost exhibits acceptable performance in this tough environment and avoids escalation of errors. This can be attributed to two aspects. On one hand, Bifrost can function once the input signal has enough energy, without the need for LoS AP. On the other hand, Bifrost exploits a delicate algorithm to tame the multipath effect. We will further discuss issues of multipath and NLoS in §4.4. In this scenario, SpotFi doesn't outperform Bifrost on any point.", + "bbox": [ + 81, + 531, + 482, + 642 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Finally, we set SpotFi and Bifrost in the large-size classroom with rich multipath. With a LoS AP, the median error of SpotFi is reduced to $1.87\\mathrm{m}$ , which is better than that in the meeting room with no LoS AP. By contrast, the median error of Bifrost increases to $1.20\\mathrm{m}$ , mainly due to a longer distance between LWAs and WiFi APs and more multipath.", + "bbox": [ + 81, + 643, + 482, + 724 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Through all experiments in four scenarios, the median error of Bifrost is $0.81\\mathrm{m}$ , which is $52.35\\%$ less than that of SpotFi (i.e., $1.70\\mathrm{m}$ ). Bifrost outperforms SpotFi at most locations, except at which the target can obtain 3 LoS signals from 3 APs. However, as shown in Fig. 13, the chance for SpotFi to achieve better performance is less than $7\\%$ .", + "bbox": [ + 81, + 724, + 482, + 808 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Performance Comparison in NLoS Scenarios. Then we conduct two groups of experiments to demonstrate Bifrost's ability of localization in NLoS scenarios and compare its performance with that of SpotFi.", + "bbox": [ + 81, + 811, + 482, + 866 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In the first group of experiments, we deploy the localized target and the LWAs in a hall. As Bifrost only uses one AP to function,", + "bbox": [ + 81, + 866, + 482, + 893 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "we evaluate the performance of Bifrost when this AP is inside and outside the hall (i.e., LoS and NLoS scenarios). The results in Fig. 15 show that the median errors of Bifrost are $0.61\\mathrm{m}$ in LoS and $0.73\\mathrm{m}$ in NLoS, respectively. Meanwhile, in the same hall, we also evaluate the performance of SpotFi in LoS and NLoS scenarios, respectively. In the LoS scenario, 3 APs are deployed in the hall and can establish LoS connections with the target. In the NLoS scenario, as Fig. 14(a) shows, one of the APs (i.e., AP1) is outside the room, while the other 2 APs (i.e., AP2 and AP3) can connect with the target along the LoS paths. We find that the median error of SpotFi increases from $0.45\\mathrm{m}$ in LoS to $1.15\\mathrm{m}$ in NLoS. The error may further go beyond $1.6\\mathrm{m}$ if only one AP is left in LoS, as reported in [43].", + "bbox": [ + 511, + 316, + 915, + 496 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In the second group of experiments, we compare the performance of Bifrost and SpotFi using a different NLoS setting. As Fig. 14(b) shows, we deploy the localized target, LWAs and three APs in the same hall. One of the three WiFi APs (i.e., AP1) is deliberately deployed around the corner and surrounded by multiple chairs, so it can't establish LoS connections between the target or the LWAs, while the other 2 APs (i.e., AP2 and AP3) can connect with the target along the LoS paths. SpotFi uses all 3 APs to localize the target, and its median error is $1.21\\mathrm{m}$ . Bifrost only uses the AP in NLoS (i.e., AP1) to function, and its median error is $0.69\\mathrm{m}$ , which is $42.98\\%$ less than that of SpotFi.", + "bbox": [ + 511, + 497, + 915, + 648 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "These two groups of experiments demonstrate that Bifrost provides relatively stable performance when the WiFi AP is in LoS and NLoS scenarios. In NLoS scenarios, Bifrost can achieve much more accurate performance than SpotFi.", + "bbox": [ + 511, + 648, + 913, + 705 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Performance Enhancement when Bifrost Aids SpotFi. Next, we deploy Bifrost where SpotFi shows poor accuracy to see if Bifrost can aid SpotFi to improve localization accuracy. Actually, it is impossible to deploy Bifrost everywhere, so we choose the meeting room and classroom where localization accuracy is heavily affected by constrained APs and reports the worst results. Specifically, when the target gets into these two scenarios, its location will be reported by Bifrost. Otherwise, the target keeps using SpotFi for indoor localization.", + "bbox": [ + 511, + 708, + 915, + 830 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "As shown in Fig. 16, the median localization error is $1.13\\mathrm{m}$ when Bifrost aids SpotFi, achieving $33.54\\%$ error reduction compared with SpotFi operating independently in all scenarios. This indicates", + "bbox": [ + 511, + 832, + 911, + 875 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Bifrost", + "bbox": [ + 84, + 71, + 137, + 85 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye", + "bbox": [ + 594, + 71, + 911, + 85 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "384", + "bbox": [ + 486, + 922, + 514, + 934 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/352097b5d2f11c8cdaf6dd1ceacd68ed9b900106b469d063d16f21faf358a35f.jpg", + "image_caption": [ + "Figure 15: Performance of Bifrost Figure 16: Performance enhance-Figure 17: Ablation study on the and SpotFi in the NLoS scenario. ment brought by Bifrost. localization algorithm." + ], + "image_footnote": [], + "bbox": [ + 84, + 103, + 292, + 251 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/8440172285ba16eaeb2c520a87c7567b6450151cb2e9e25468a2c653a068c295.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 295, + 103, + 493, + 250 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/67c7cfcad83ea3c2c70ebae1a6e28f6ac8a004921890e9bd31e0f8baa85750ab.jpg", + "image_caption": [ + "Figure 18: Impact of the multipath effect." + ], + "image_footnote": [], + "bbox": [ + 496, + 102, + 689, + 250 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/46dc08a5fc2398f6e3fd535ac2a3b8f880e1ed26ef679898911b6677925990eb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 707, + 103, + 913, + 250 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "that Bifrost can not only work independently, but also enhance localization accuracy of existing localization techniques.", + "bbox": [ + 81, + 311, + 480, + 339 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.3 Ablation Study", + "text_level": 1, + "bbox": [ + 81, + 358, + 250, + 375 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "There are three crucial sub-modules in Bifrost's localization algorithm, that is, identifying the frequencies of CP signals (module 1 presented in §3.3), filtering out the multipath signal (module 2 presented in §3.4) and purifying the LoS signal for localization (module 3 presented in §3.5). We conduct an ablation study to evaluate the contribution of each sub-module to localization accuracy. The evaluation is conducted under four settings, S1: without any sub-module, S2: only with module 1, S3: with modules 1 and 2, and S4: with all three modules.", + "bbox": [ + 81, + 377, + 483, + 500 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Fig. 17 reports the results of this ablation study. If we do nothing and directly extract frequencies from raw amplitude data of CSI, the median localization error will surge to $3.31\\mathrm{m}$ (S1). Instead, once the LP WiFi signal is filtered out, the frequencies of CP signals can be highlighted, which results in the median localization error of $1.51\\mathrm{m}$ (S2). Further, the results of S3 and S4 show that the median error will be reduced to around $0.93\\mathrm{m}$ and $0.81\\mathrm{m}$ if we filter out the multipath signal and purify the LoS signal. These results show the necessity and contribution of each module in our design.", + "bbox": [ + 81, + 501, + 483, + 626 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.4 Impacting Factors", + "text_level": 1, + "bbox": [ + 81, + 645, + 274, + 662 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Next, we analyze the impact of three different factors on the performance of Bifrost, that is, multipath in the environment, the transmission power, as well as the distance between LWAs and WiFi AP.", + "bbox": [ + 81, + 664, + 483, + 718 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Multipath. We examine the AoA estimation accuracy of Bifrost in multipath scenarios. We fix the positions of LWAs and the target, then change the number of indoor objects (i.e., chairs and desks) to create different degrees of multipath. Specifically, two desks are first set in the room to emulate a light multipath environment, and then ten chairs are further added to produce richer signal reflections. The results in Fig. 18 indicate that the AoA estimation accuracy degrades as the multipath is intensified, where the median angle error initially sits around $3.8^{\\circ}$ , and then increases to around $6.7^{\\circ}$ . The more multipath exists, the more sparsely clustered subcarriers $C_s$ are formed. Thus, when these clusters are stacked with each other to form a wider cluster, there is a certain chance for our", + "bbox": [ + 81, + 724, + 483, + 890 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "algorithm to misidentify the wrong LoS signal, causing greater errors in AoA estimation.", + "bbox": [ + 513, + 311, + 913, + 338 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We also note that Bifrost maintains relatively stable performance across different polarizations. The difference between median errors of LHCP and RHCP signals is less than $0.3^{\\circ}$ , which underscores the robustness of our proposed LWA and localization algorithm.", + "bbox": [ + 513, + 339, + 915, + 407 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Transmission Power. The default transmission power of AP is 20dBm in our above-mentioned evaluations, and we now vary this value to investigate its impact on localization performance. Moreover, as mentioned before, we can't always guarantee that the WiFi AP establishes LoS path with LWAs, so we also compare the situation of the AP at LoS and NLoS scenarios in each setting of transmission power. We place AP at 2m distance outside the door and the target 2m inside the door, switching between the LoS and NLoS scenarios by opening and closing the door. Results in Fig. 19 show that decreasing the transmission power leads to an increase in the localization error, regardless of whether the AP is at LoS or NLoS. Besides, the errors in LoS scenario are always lower than that of NLoS for the same transmission power. These findings indicate the negative impact on localization performance that NLoS can have.", + "bbox": [ + 511, + 412, + 915, + 619 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "However, we also observe that as the transmission power increases, the impact of NLoS on the performance of Bifrost decreases, albeit gradually. Notably, when the transmission power is set at $20\\mathrm{dBm}$ , the median errors are $0.61\\mathrm{m}$ and $0.73\\mathrm{m}$ at LoS and NLoS scenarios, respectively. In practical scenarios, this performance is sufficient to meet the requirements of most location-based applications.", + "bbox": [ + 513, + 621, + 915, + 718 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Distance between AP and LWAs. The performance of Bifrost may be influenced by the energy of the input WiFi signal fed into LWAs, because it determines the SNR (signal-to-noise ratio) of the FSDM signal. The energy of the input WiFi signal is mainly related to two factors, namely the transmission power and the distance between the AP and LWAs. While the former factor is previously discussed, we here probe into the impact of distance. We carry out the experiments along the corridor and remove the reflectors as far as possible, while the distance is set to $2.5\\mathrm{m}$ , $5\\mathrm{m}$ , $7.5\\mathrm{m}$ , and $10\\mathrm{m}$ . Results in Fig. 20 demonstrate that the localization error increases with distance and may even result in outliers. The median errors are $0.63\\mathrm{m}$ , $0.65\\mathrm{m}$ , and $0.93\\mathrm{m}$ in the first three groups of experiments,", + "bbox": [ + 511, + 723, + 915, + 890 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye", + "bbox": [ + 83, + 71, + 403, + 85 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Yimiao Sun, et al.", + "bbox": [ + 803, + 71, + 913, + 85 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "385", + "bbox": [ + 486, + 922, + 514, + 934 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/16a38231d650bb86b5ff8ee17331187aa7c7d79c58ccc45b049d167c2786c7dd.jpg", + "image_caption": [ + "Figure 19: Impact of the transmis- Figure 20: Impact of the distance Figure 21: Impact on the AP and Figure 22: Impact on other WiFi con-sion power. between AP and LWAs. the target of Bifrost. nections." + ], + "image_footnote": [], + "bbox": [ + 89, + 103, + 299, + 251 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/2ce18e1841f8f48f59c2e0552990819ca20516f63df55589e295f5240d1087a7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 303, + 103, + 496, + 251 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/5f2a96e0c1af772f0330b928316bb4518b1496e40fd924527949102072aa6cf5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 103, + 697, + 251 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/41240c86f3dc7ef92bd433d9ea054df14aee43845c5886815c7a746fcfd18272.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 700, + 103, + 903, + 250 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "all of which are below $1\\mathrm{m}$ , yet spike to $1.49\\mathrm{m}$ in the setting of $10\\mathrm{m}$ distance. Despite this, the range of $7.5\\mathrm{m}$ is sufficient to cover most rooms in a typical building, thus ensuring the feasibility of Bifrost's function.", + "bbox": [ + 81, + 311, + 483, + 367 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.5 Impact on Communication", + "text_level": 1, + "bbox": [ + 81, + 378, + 346, + 395 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In this section, we evaluate the impact of deploying Bifrost on the WiFi connections, including the connection between the AP and the target as well as other connections. Firstly, we control the AP to transmit 1000 packets at a $50~ms$ interval, and the packet loss rate is recorded in each group of experiments. The results in Fig. 21 show that the median packet loss rates are $3.92\\%$ and $3.71\\%$ when the LWA is on and off, respectively. This $0.2\\%$ difference implies that the function of Bifrost has a negligible influence on the AP-target communication.", + "bbox": [ + 81, + 397, + 482, + 521 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Secondly, we place Bifrost's transceiver at an intersection region covered by two commercial APs (AP1 in a classroom and AP2 in a laboratory) with good signal quality. We then use different off-the-shelf smartphones to establish WiFi connections with these APs and record the variation in throughput over 2 hours for each connection (C1: OnePlus 9-AP1, C2: iPhone 13-AP2, C3: OnePlus 9-AP1, and C4: iPhone-13-AP2). The results are shown in Fig. 22. We find that the median throughput degrades $2.7\\%$ and $0.4\\%$ in C1 and C3, which have nearly no impact on the network quality or user experience. Interestingly, the throughput increases when the LWAs are turned on for C2 and C4. We attribute this increase to the statistical error that is mainly caused by changes in network quality and wireless channels.", + "bbox": [ + 81, + 522, + 483, + 702 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.6 Summary of Evaluation", + "text_level": 1, + "bbox": [ + 83, + 713, + 321, + 729 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Based on the above evaluations on Bifrost, the following summary can be drawn:", + "bbox": [ + 81, + 731, + 482, + 758 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) The median localization error of Bifrost is $0.81\\mathrm{m}$ , which is $52.35\\%$ less than that of SpotFi in arguably realistic indoor settings.", + "2) Bifrost can be deployed in scenarios without enough APs to help SpotFi enhance performance, reducing the overall localization error of SpotFi by $33.54\\%$ .", + "3) Distance between LWAs and APs, multipath and transmission power influence Bifrost's performance differently, yet the absolute accuracy never degrades drastically." + ], + "bbox": [ + 86, + 762, + 483, + 895 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4) The deployment of Bifrost has a negligible impact on the communication quality of either the link between the AP and the target or other WiFi connections.", + "bbox": [ + 519, + 311, + 911, + 354 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "5 DISCUSSION", + "text_level": 1, + "bbox": [ + 514, + 373, + 665, + 387 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In this section, we discuss practical issues concerning the applicability and efficacy of Bifrost.", + "bbox": [ + 513, + 392, + 915, + 420 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Complexity of Deployment. Deploying Bifrost can be easy and straightforward via two steps: stick LWAs to the wall, and measure LWAs' coordinates. Compared with most existing indoor localization methods, Bifrost works in a plug-and-play manner, requiring neither complex configurations nor additional operations on APs and the target.", + "bbox": [ + 513, + 422, + 915, + 506 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "FoV and Coverage of LWAs. Bifrost achieves $22^{\\circ}$ FoV in the current prototype by using 160MHz bandwidth (5.17GHz - 5.33GHz). The FoV and coverage can be expanded by using the entire WiFi band, including frequencies at 2.4GHz, 5.2GHz, and 5.8GHz [47]. This expansion is feasible because most existing WiFi devices have supported dual- or tri-band functionality.", + "bbox": [ + 513, + 508, + 915, + 593 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Applicability. Considering that most of the current commercial WiFi devices are equipped with LP antennas, they may be not compatible with Bifrost yet. There are two potential solutions to enhance the applicability of Bifrost. On one hand, some commercial off-the-shelf CP antennas (e.g., CP flat patch antennas [45] of L-com, Inc) are developed to be integrated with existing WiFi APs. Bifrost can be deployed on such devices. On the other hand, in our future work, we will study how to utilize LP rather than CP signals to improve the applicability of Bifrost. To distinguish LWAs using the LP signals, different phase shifts or OOK patterns may be exploited.", + "bbox": [ + 511, + 595, + 915, + 748 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Besides, the indoor obstacles may also influence the applicability of Bifrost. The reason is that the localization performance will degrade if the LoS paths between LWAs and the target are blocked by the obstacles. Therefore, one may select proper positions to deploy LWAs to avoid NLoS propagation to the target to be localized. However, the LoS path between LWAs and the WiFi AP isn't a precondition. As long as the LWAs can receive the signal from the WiFi AP, Bifrost can work.", + "bbox": [ + 511, + 750, + 915, + 859 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Lifetime and Maintenance Cost. The rated current of LWAs is $0.86\\mathrm{mA}$ . A LWA is powered with a 1600mAh battery and works", + "bbox": [ + 513, + 863, + 911, + 891 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Bifrost", + "bbox": [ + 84, + 71, + 137, + 85 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye", + "bbox": [ + 594, + 71, + 911, + 87 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "386", + "bbox": [ + 486, + 922, + 514, + 934 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "at $20\\%$ duty cycle. So the estimated lifetime of a LWA is over 9302 hours ( $\\approx$ 387 days) and the maintenance cost is recharging the battery once every 387 days.", + "bbox": [ + 81, + 106, + 480, + 148 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Potential Interference. One may be concerned that if multiple LWAs are deployed closely, LWAs with the same polarization will interfere with each other. However, each room only has one RHCP LWA and one LHCP LWA in the setting of Bifrost, so LWAs with the same polarization are separated by walls. Interference signals must propagate through the wall, after which they only have low strength. Therefore, different pairs of LWAs hardly interfere with each other.", + "bbox": [ + 81, + 151, + 480, + 261 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "6 RELATED WORK", + "text_level": 1, + "bbox": [ + 81, + 277, + 256, + 292 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In this section, we briefly summarize existing works in the fields related to our work.", + "bbox": [ + 81, + 297, + 480, + 325 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "6.1 Application of LWA", + "text_level": 1, + "bbox": [ + 81, + 340, + 290, + 357 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The work closest to ours is 123-LOC [42], which presents a THz LWA with two perpendicular slots to radiate horizontal and vertical polarized FSDM signals. Range and angle estimation is then performed by the receiver based on the bandwidth and frequencies of received signals. In comparison, Bifrost reduces the impact of multipath and achieves room-scale localization, which is a challenging task for THz signals.", + "bbox": [ + 81, + 359, + 483, + 455 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "LeakyTrack [21] tracks the object between two LWAs based on the observation that nodal and environmental motion changes the received spectral profile of FSDM signals. [76] investigates the security of THz networks with LWAs and shows that FSDM signals of the LWA can hinder eavesdroppers, e.g., by using a wide-band transmission. [20] and [22] study single-shot link discovery with the help of FSDM signals from the LWA. A receiver can discover the direction of the path from the transmitter in one shot. In contrast to those works that require a specific feeding device for THz LWA, Bifrost operates in the WiFi band and works in a plug-and-play manner, providing better applicability and convenience. Additionally, Bifrost addresses relevant challenges, including multipath, noise and ambiguity, by delicately designing the hardware and localization algorithm.", + "bbox": [ + 81, + 455, + 483, + 650 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "6.2 WiFi-based Indoor Localization", + "text_level": 1, + "bbox": [ + 81, + 666, + 379, + 681 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "There have been numerous efforts on indoor localization with WiFi [16, 49, 61, 68-70, 84]. Traditional fingerprint-based techniques have been widely used by mapping the RSS readings from multiple APs with locations [46, 66]. Techniques based on AoA and ToF have become more prevalent recently. For example, ArrayTrack [69] proposes an AoA-based WiFi localization system that incorporates multiple APs and the Multiple Signal Classification (MUSIC) algorithm. SpotFi [43] proposes a MUSIC algorithm to obtain AoA and ToF simultaneously. The $M^3$ system [16] reduces the amount of APs to only one by utilizing multipath signals and frequency hopping among multiple channels.", + "bbox": [ + 81, + 684, + 483, + 837 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Despite such inspiring advances, the existing proposals may chop up the communication link between the target and the AP when the target hops between different APs or channels. In contrast, Bifrost does not interfere with the communication link, which", + "bbox": [ + 81, + 838, + 483, + 892 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "supplements the APs' localization ability, without compromising their communication ability.", + "bbox": [ + 513, + 106, + 913, + 133 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "6.3 Polarization of the Wireless Signal", + "text_level": 1, + "bbox": [ + 513, + 145, + 841, + 162 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "LLAMA [15] designs a metasurface to mitigate polarization mismatch by rotating the polarization of wireless signals, which is achieved by applying the bias voltage to the orthogonal compo", + "bbox": [ + 511, + 164, + 915, + 205 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "nents (like $\\overrightarrow{E_x}$ and $\\overrightarrow{E_y}$ shown in Fig. 4) of input signals. RoS [55] and mmTag [51] propose well-designed Van Atta arrays. They all change the polarization of input mmWave signals to the orthogonal one to deal with the self-interference between the incoming signals and the backscattered signals. IntuWition [77] observes that different materials can reflect and scatter the incoming polarized signals in different ways, based on which it exploits the technique to classify various materials. SiWa [83] utilizes the similar principle to inspect the wall structure without undermining the structural integrity.", + "bbox": [ + 511, + 210, + 915, + 351 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The above-mentioned works mainly focus on mutable LP signals. Bifrost instead explores the use of orthogonal CP signals, providing more robust performance.", + "bbox": [ + 511, + 351, + 915, + 393 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "6.4 Backscatter-aided Localization", + "text_level": 1, + "bbox": [ + 513, + 398, + 802, + 414 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Enabled by the backscatter technology [8, 26, 29-31, 37, 53, 55, 58], many novel applications are enabled, one of which is localization. Both Hawkeye [8] and Millimetro [58] design backscatter tags based on Van Atta arrays to enhance the energy of backscatter signals, so they can localize tags in long range (over $100\\mathrm{m}$ ). By assigning unique OOK modulation frequencies to different tags, those two works can also identify and localize tags simultaneously. Moreover, RFID technology [34-36, 39-41] has been widely used in localization tasks. As a typical backscatter technology, RFID can modulate information via the RFID tags. Then, RFID reader can usually infer the range or orientation to the tags by analyzing the phase variation of the backscatter signals.", + "bbox": [ + 511, + 417, + 915, + 583 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Compared to those works, Bifrost utilizes tags (i.e., LWAs) to create FSDM signals to localize another target, rather than the tag itself.", + "bbox": [ + 511, + 584, + 911, + 625 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "7 CONCLUSION", + "text_level": 1, + "bbox": [ + 514, + 636, + 676, + 651 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "This paper introduces Bifrost, a low-cost and plug-and-play technique to enhance the availability and accuracy of WiFi localization. It can either aid existing techniques to improve their performance, or operate independently to outperform the state of the arts in arguably realistic indoor settings, without affecting ongoing data communication of WiFi networks. What sets Bifrost apart from other solutions is the exploration in the polarization of wireless signals and the dispersion property of LWAs, which embodies the concept of RF computing [15, 29, 53, 55]. We plan to explore the research space further in this direction.", + "bbox": [ + 511, + 655, + 915, + 794 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 514, + 805, + 728, + 819 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We thank our anonymous shepherd and reviewers for their insightful comments. This work is partially supported by the National Natural Science Foundation of China under grant No. U21B2007, and the Guoqiang Institute of Tsinghua University under grant No. 2021GQG1002.", + "bbox": [ + 511, + 824, + 915, + 892 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye", + "bbox": [ + 83, + 71, + 401, + 85 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Yimiao Sun, et al.", + "bbox": [ + 803, + 71, + 913, + 85 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "387", + "bbox": [ + 486, + 922, + 514, + 934 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 84, + 104, + 205, + 118 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] 2021. IEEE Standard for Information Technology-Telecommunications and Information Exchange between Systems Local and Metropolitan Area Networks-Specific Requirements Part 11: Wireless LAN Medium Access Control (MAC) and Physical Layer (PHY) Specifications Amendment 1: Enhancements for High-Efficiency WLAN. IEEE Std 802.11ax-2021 (Amendment to IEEE Std 802.11-2020) (2021).", + "[2] Afaz Uddin Ahmed, Reza Arablouei, Frank De Hoog, Branislav Kusy, and Raja Jurdak. 2019. Multi-radio Data Fusion for Indoor Localization Using Bluetooth and WiFi. In Proceedings of the 9th International Conference on Pervasive and Embedded Computing and Communication Systems: Volume 1: PECC.", + "[3] Amazon. 2023. Amazon NETGEAR 4-Stream WiFi 6 Router. https://www.amazon.com/NETGEAR-4-Stream-WiFi-Router-R6700AX/dp/B08KTXG8Q5/ref=sr_1_5?keywords=wifi+router&qid=1687784198&sr=8-5. (2023). Accessed: 2023-06-26.", + "[4] Amazon. 2023. Amazon Tenda AC1200 Smart WiFi Router. https://www.amazon.com/Tenda-Wireless-Internet-MU-MIMO-AC6/dp/B06X1CHFJ5/ref=sr_1_51?keywords $\\equiv$ wifi+router&qid $=$ 1687784310&sr $= 8 - 51$ (2023).Accessed:2023-06-26.", + "[5] Amazon. 2023. Amazon TP-Link AC1200 WiFi Router. https://www.amazon.com/TP-Link-AC1200-Router-Archer-A54/dp/B09G5Y1HWZ/ref=sr_1_1?keywords=wifi+router&qid=1687784198&sr=8-1. (2023). Accessed: 2023-06-26.", + "[6] Amazon. 2023. Amazon TP-Link Smart WiFi 6 Router. https://www.amazon.com/TP-Link-Wireless-AX1500-Wifi-Router/dp/B07ZSDR49S/ref=sr_1_3?keywords=wifi+router&qid=1687784198&sr=8-3. (2023). Accessed: 2023-06-26.", + "[7] Roshan Ayyalasomayajula, Aditya Arun, Chenfeng Wu, Sanatan Sharma, Abhishek Rajkumar Sethi, Deepak Vasisht, and Dinesh Bharadia. 2020. Deep Learning Based Wireless Localization for Indoor Navigation. In Proceedings of the 26th Annual International Conference on Mobile Computing and Networking (MobiCom).", + "[8] Kang Min Bae, Hankyeol Moon, Sung-Min Sohn, and Song Min Kim. 2023. Hawkeye: Hectometer-range Subcentimeter Localization for Large-scale mmWave Backscatter. In Proceedings of the 21st Annual International Conference on Mobile Systems, Applications and Services (MobiSys).", + "[9] Atul Bansal, Akshay Gadre, Vaibhav Singh, Anthony Rowe, Bob Iannucci, and Swarun Kumar. 2021. Owll: Accurate LoRa Localization Using the TV Whitespaces. In Proceedings of the 20th International Conference on Information Processing in Sensor Networks (IPSN).", + "[10] Yuanxi Cao and Sen Yan. 2021. A Low-profile High-gain Multi-beam Antenna based on 3D-printed Cylindrical Luneburg Lens. Microwave and Optical Technology Letters 63, 7 (2021).", + "[11] Yuanxi Cao and Sen Yan. 2021. Multi-beam SIW Leaky-wave Antenna with 2-D Beam Scanning Capability for Millimeter-wave Radar Applications. International Journal of RF and Microwave Computer-aided Engineering 31, 5 (2021).", + "[12] Yuanxi Cao, Sen Yan, and Juan Chen. 2023. An SIW Pillbox-based Compact Dual-polarized Multibeam Antenna with Passive 2-D Beam Scanning Capability. IEEE Transactions on Circuits and Systems II: Express Briefs 70, 1 (2023).", + "[13] Yuanxi Cao, Sen Yan, Wendong Liu, and Jianxing Li. 2023. A Wideband Multibeam Pillbox Antenna Based on Differentially Fed Leaky-wave Array. IEEE Antennas and Wireless Propagation Letters 22, 3 (2023).", + "[14] Roberto Carvalho, Shan-Ho Yang, Yao-Hua Ho, and Ling-Jyh Chen. [n.d.]. Indoor Localization Using FM and DVB-T Signals. In Proceedings of the 2016 13th IEEE Annual Consumer Communications & Networking Conference (CCNC).", + "[15] Lili Chen, Wenjun Hu, Kyle Jamieson, Xiaojiang Chen, Dingyi Fang, and Jeremy Gummeson. [n. d.]. Pushing the Physical Limits of IoT Devices with Programmable Metasurfaces. In Proceedings of the 18th USENIX Symposium on Networked Systems Design and Implementation (NSDI).", + "[16] Zhe Chen, Guorong Zhu, Sulei Wang, Yuedong Xu, Jie Xiong, Jin Zhao, Jun Luo, and Xin Wang. 2019. $M^3$ : Multipath Assisted Wi-Fi Localization with a Single Access Point. IEEE Transactions on Mobile Computing 20, 2 (2019).", + "[17] COMFAST. 2023. CF-AX210 PRO. http://www.comfast.com.cn/index.php?m=content&c=index&a=show&catid=13&id=123. (2023). Accessed: 2023-03-17.", + "[18] Pei Du and Nirupama Bulusu. 2021. An Automated AR-based Annotation Tool for Indoor Navigation for Visually Impaired People. In Proceedings of the 23rd International ACM SIGACCESS Conference on Computers and Accessibility.", + "[19] Pei Du and Nirupama Bulusu. 2022. Indoor Navigation for Visually Impaired People with Vertex Colored Graphs. In Proceedings of the 20th Annual International Conference on Mobile Systems, Applications and Services (MobiSys).", + "[20] Yasaman Ghasempour, Rabi Shrestha, Aaron Charous, Edward Knightly, and Daniel M Mittleman. 2020. Single-shot Link Discovery for Terahertz Wireless Networks. Nature Communications 11, 1 (2020).", + "[21] Yasaman Ghasempour, Chia-Yi Yeh, Rabi Shrestha, Yasith Amarasinghe, Daniel Mittleman, and Edward W. Knightly. 2020. LeakyTrack: Non-coherent Single-antenna Nodal and Environmental Mobility Tracking with a Leaky-wave Antenna. In Proceedings of the 18th Conference on Embedded Networked Sensor Systems" + ], + "bbox": [ + 86, + 121, + 483, + 891 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "(SenSys).", + "[22] Yasaman Ghasempour, Chia-Yi Yeh, Rabi Shrestha, Daniel Mittleman, and Edward Knightly. 2020. Single Shot Single Antenna Path Discovery in THz Networks. In Proceedings of the 26th Annual International Conference on Mobile Computing and Networking (MobiCom).", + "[23] Jon Gjengset, Jie Xiong, Graeme McPhillips, and Kyle Jamieson. 2014. Accurate Indoor Localization with Zero Start-up Cost. In Proceedings of the 20th Annual International Conference on Mobile Computing and Networking (MobiCom).", + "[24] Jon Gjengset, Jie Xiong, Graeme McPhillips, and Kyle Jamieson. 2014. Phaser: Enabling Phased Array Signal Processing on Commodity WiFi Access Points. In Proceedings of the 20th Annual International Conference on Mobile Computing and Networking (MobiCom).", + "[25] Baoshen Guo, Weijian Zuo, Shuai Wang, Wenjun Lyu, Zhiqing Hong, Yi Ding, Tian He, and Desheng Zhang. 2022. Wepos: Weak-supervised Indoor Positioning with Unlabeled WiFi for On-demand Delivery. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 6, 2 (2022), 1-25.", + "[26] Xiuzhen Guo, Yuan He, Zihao Yu, Jiacheng Zhang, Yunhao Liu, and Longfei Shangguan. 2022. RF-transformer: A Unified Backscatter Radio Hardware Abstraction. In Proceedings of the 28th Annual International Conference on Mobile Computing And Networking (MobiCom).", + "[27] Xiuzhen Guo, Yuan He, Xiaolong Zheng, Liangcheng Yu, and Omprakash Gnawali. 2018. ZigFi: Harnessing Channel State Information for Cross-Technology Communication. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM).", + "[28] Xiuzhen Guo, Yuan He, Xiaolong Zheng, Liangcheng Yu, and Omprakash Gnawali. 2020. ZigFi: Harnessing Channel State Information for Cross-Technology Communication. IEEE/ACM Transactions on Networking 28, 1 (2020), 301–311.", + "[29] Xiuzhen Guo, Longfei Shangguan, Yuan He, Nan Jing, Jiacheng Zhang, Haotian Jiang, and Yunhao Liu. 2022. Saiyan: Design and Implementation of a Low-power Demodulator for LoRa Backscatter Systems. In Proceedings of the 19th USENIX Symposium on Networked Systems Design and Implementation (NSDI).", + "[30] Xiuzhen Guo, Longfei Shangguan, Yuan He, Jia Zhang, Haotian Jiang, Awais Ahmad Siddiqi, and Yunhao Liu. 2020. Aloba: Rethinking ON-OFF Keying Modulation for Ambient LoRa Backscatter. In Proceedings of the 18th Conference on Embedded Networked Sensor Systems (SenSys).", + "[31] Xiuzhen Guo, Longfei Shangguan, Yuan He, Jia Zhang, Haotian Jiang, Awais Ahmad Siddiqi, and Yunhao Liu. 2021. Efficient Ambient LoRa Backscatter with On-Off Keying Modulation. IEEE/ACM Transactions on Networking 30, 2 (2021), 641-654.", + "[32] Yuan He, Weiguo Wang, Luca Mottola, Shuai Li, Yimiao Sun, Jinming Li, Hua Jing, Ting Wang, and Yulei Wang. 2023. Acoustic Localization System for Precise Drone Landing. IEEE Transactions on Mobile Computing (2023).", + "[33] David R Jackson, Christophe Caloz, and Tatsuo Itoh. 2012. Leaky-wave Antennas. Proc. IEEE 100, 7 (2012).", + "[34] Chengkun Jiang, Yuan He, Songzhen Yang, Junchen Guo, and Yunhao Liu. 2019. 3D-OmniTrack: 3D Tracking with COTS RFID Systems. In Proceedings of the 18th International Conference on Information Processing in Sensor Networks (IPSN).", + "[35] Chengkun Jiang, Yuan He, Xiaolong Zheng, and Yunhao Liu. 2018. Orientation-aware RFID Tracking with Centimeter-level Accuracy. In Proceedings of the 17th International Conference on Information Processing in Sensor Networks (IPSN).", + "[36] Chengkun Jiang, Yuan He, Xiaolong Zheng, and Yunhao Liu. 2019. OmniTrack: Orientation-aware RFID Tracking with Centimeter-level Accuracy. IEEE Transactions on Mobile Computing 20, 2 (2019), 634-646.", + "[37] Haotian Jiang, Jiacheng Zhang, Xiuzhen Guo, and Yuan He. 2021. Sense Me on the Ride: Accurate Mobile Sensing Over a LoRa Backscatter Channel. In Proceedings of the 19th ACM Conference on Embedded Networked Sensor Systems (SenSys).", + "[38] Zhiping Jiang, Tom H. Luan, Xincheng Ren, Dongtao Lv, Han Hao, Jing Wang, Kun Zhao, Wei Xi, Yueshen Xu, and Rui Li. 2022. Eliminating the Barriers: Demystifying Wi-Fi Baseband Design and Introducing the PicoScenes Wi-Fi Sensing Platform. IEEE Internet of Things Journal 9, 6 (2022).", + "[39] Meng Jin, Yuan He, Songzhen Yang, Yunhao Liu, Li Yan, and Yuji Sun. 2022. Versatile RFID-based Sensing: Model, Algorithm, and Applications. IEEE Transactions on Mobile Computing (2022).", + "[40] Meng Jin, Kexin Li, Xiaohua Tian, Xinbing Wang, and Chenghu Zhou. 2023. Fast, Fine-grained, and Robust Grouping of RFIDs. In Proceedings of the 29th Annual International Conference on Mobile Computing and Networking (MobiCom).", + "[41] Meng Jin, Shun Yao, Kexin Li, Xiaohua Tian, Xinbing Wang, Chenghu Zhou, and Xinde Cao. 2022. A Passive Eye-in-Hand\" Camera\" for Miniature Robots. In Proceedings of the 20th ACM Conference on Embedded Networked Sensor Systems (SenSys).", + "[42] Atsutse Kludze, Rabi Shrestha, Chowdhury Miftah, Edward Knightly, Daniel Mittleman, and Yasaman Ghasempour. 2022. Quasi-optical 3D Localization Using Asymmetric Signatures above 100 GHz. In Proceedings of the 28th Annual International Conference on Mobile Computing And Networking (MobiCom).", + "[43] Manikanta Kotaru, Kiran Joshi, Dinesh Bharadia, and Sachin Katti. 2015. SpotFi: Decimeter Level Localization Using WiFi. In Proceedings of the 2015 ACM Conference on Special Interest Group on Data Communication (SIGCOMM)." + ], + "bbox": [ + 517, + 109, + 911, + 887 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Bifrost", + "bbox": [ + 84, + 71, + 137, + 85 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye", + "bbox": [ + 594, + 71, + 911, + 85 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "388", + "bbox": [ + 486, + 922, + 514, + 934 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[44] Vikram Kumar, Reza Arablouei, Raja Jurdak, Branislav Kusy, and Neil W Bergmann. 2017. RSSI-based Self-localization with Perturbed Anchor Positions. In Proceedings of the 2017 IEEE 28th Annual International Symposium on Personal, Indoor, and Mobile Radio Communications (PIMRC).", + "[45] L-com. 2023. Circular Polarized Patch Antenna. https://www.l-com.com/wireless-antenna-24-ghz-8-dbi-circular-polarized-rh-flat-patch-antennas. (2023). Accessed: 2023-10-03.", + "[46] Danyang Li, Jingao Xu, Zheng Yang, Chenshu Wu, Jianbo Li, and Nicholas D Lane. 2021. Wireless Localization with Spatial-temporal Robust Fingerprints. ACM Transactions on Sensor Networks 18, 1 (2021), 1-23.", + "[47] Tianxiang Li, Haofan Lu, Reza Rezvani, Ali Abedi, and Omid Abari. 2022. Bringing WiFi Localization to Any WiFi Devices. In Proceedings of the 21st ACM Workshop on Hot Topics in Networks (HotNets).", + "[48] Tianxiang Li, Mohammad Hossein Mazaheri, and Omid Abari. 2022. 5g in the Sky: The Future of High-speed Internet via Unmanned Aerial Vehicles. In Proceedings of the 23rd Annual International Workshop on Mobile Computing Systems and Applications.", + "[49] Xiang Li, Daqing Zhang, Qin Lv, Jie Xiong, Shengjie Li, Yue Zhang, and Hong Mei. 2017. IndoTrack: DeviceFree Indoor Human Tracking with Commodity Wi-Fi. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 1, 3 (2017).", + "[50] Bo Liang, Purui Wang, Renjie Zhao, Heyu Guo, Pengyu Zhang, Junchen Guo, Shunmin Zhu, Hongqiang Harry Liu, Xinyu Zhang, and Chenren Xu. 2023. RF-Chord: Towards Deployable RFID Localization System for Logistic Networks. In Proceedings of the 20th USENIX Symposium on Networked Systems Design and Implementation (NSDI).", + "[51] Mohammad Hossein Mazaheri, Alex Chen, and Omid Abari. 2021. mmTag: A Millimeter Wave Backscatter Network. In Proceedings of the 2021 ACM SIGCOMM 2021 Conference (SIGCOMM).", + "[52] Francesco Monticone and Andrea Alu. 2015. Leaky-wave Theory, Techniques, and Applications: From Microwaves to Visible Frequencies. Proc. IEEE 103, 5 (2015).", + "[53] Xin Na, Xiuzhen Guo, Zihao Yu, Jia Zhang, Yuan He, and Yunhao Liu. 2023. Leggiero: Analog WiFi Backscatter with Payload Transparency. In Proceedings of the 21st Annual International Conference on Mobile Systems, Applications and Services (MobiSys).", + "[54] Sujay Narayana, Vijay Rao, R Venkatesha Prasad, Ajay K Kanthila, Kavya Managundi, Luca Mottola, and T Venkata Prabhakar. 2020. LOCI: Privacy-aware, Device-free, Low-power Localization of Multiple Persons Using IR Sensors. In Proceedings of the 19th International Conference on Information Processing in Sensor Networks (IPSN).", + "[55] John Nolan, Kun Qian, and Xinyu Zhang. 2021. RoS: Passive Smart Surface for Roadside-to-Vehicle Communication. In Proceedings of the 2021 ACM SIGCOMM 2021 Conference (SIGCOMM).", + "[56] Yuanchao Shu, Zhuqi Li, Borje Karlsson, Yiyong Lin, Thomas Moscibroda, and Kang Shin. [n. d.]. incrementally-deployable Indoor Navigation with Automatic Trace Generation. In Proceedings of IEEE International Conference on Computer Communications.", + "[57] Elahe Soltanaghaei, Avinash Kalyanaraman, and Kamin Whitehouse. 2018. Multipath Triangulation: Decimeter-level WiFi Localization and Orientation with a Single Unaided Receiver. In Proceedings of the 16th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys).", + "[58] Elahe Soltanaghaei, Akarsh Prabhakara, Artur Balanuta, Matthew Anderson, Jan M Rabaey, Swarun Kumar, and Anthony Rowe. 2021. Millimetre: mmWave Retro-reflective Tags for Accurate, Long Range Localization. In Proceedings of the 27th Annual International Conference on Mobile Computing and Networking (MobiCom).", + "[59] Yimiao Sun, Weiguo Wang, Luca Mottola, Ruijin Wang, and Yuan He. 2022. AIM: Acoustic Inertial Measurement for Indoor Drone Localization and Tracking. In Proceedings of the 20th Conference on Embedded Networked Sensor Systems (SenSys).", + "[60] Huy Tran, Abhishek Mukherji, Nirupama Bulusu, Santosh Pandey, and Xu Zhang. 2019. Improving Infrastructure-based Indoor Positioning Systems with Device Motion Detection. In Proceedings of the 2019 IEEE International Conference on Pervasive Computing and Communications (PerCom).", + "[61] Ju Wang, Hongbo Jiang, Jie Xiong, Kyle Jamieson, Xiaojiang Chen, Dingyi Fang, and Binbin Xie. 2016. LiFS: Low Human-effort, Device-free Localization with Fine-grained Subcarrier Information. In Proceedings of the 22nd Annual International Conference on Mobile Computing and Networking (MobiCom).", + "[62] Weiguo Wang, Yuan He, Meng Jin, Yimiao Sun, and Xiuzhen Guo. 2023. Meta-Speaker: Acoustic Source Projection by Exploiting Air Nonlinearity. In Proceedings of the 29st Annual International Conference on Mobile Computing and Networking (MobiCom).", + "[63] Weiguo Wang, Luca Mottola, Yuan He, Jinming Li, Yimiao Sun, Shuai Li, Hua Jing, and Yulei Wang. 2022. MicNest: Long-range Instant Acoustic Localization" + ], + "bbox": [ + 84, + 108, + 483, + 864 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "of Drones in Precise Landing. In Proceedings of the 20th Conference on Embedded Networked Sensor Systems (SenSys).", + "[64] Yongyong Wei and Rong Zheng. 2020. Handling Device Heterogeneity in Wi-Fi Based Indoor Positioning Systems. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM).", + "[65] Yongyong Wei and Rong Zheng. 2021. Efficient Wi-Fi Fingerprint Crowdsourcing for Indoor Localization. IEEE Sensors Journal 22, 6 (2021), 5055-5062.", + "[66] Chenshu Wu, Jingao Xu, Zheng Yang, Nicholas D Lane, and Zuwei Yin. 2017. Gain without Pain: Accurate WiFi-based Localization Using Fingerprint Spatial Gradient. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 1, 2 (2017), 1-19.", + "[67] Chenshu Wu, Zheng Yang, Zimu Zhou, Yunhao Liu, and Mingyan Liu. 2016. Mitigating Large Errors in WiFi-based Indoor Localization for Smartphones. IEEE Transactions on Vehicular Technology 66, 7 (2016), 6246-6257.", + "[68] Yaxiong Xie, Jie Xiong, Mo Li, and Kyle Jamieson. 2019. md-Track: Leveraging Multi-dimensionality for Passive Indoor Wi-Fi Tracking. In Proceedings of the 25th Annual International Conference on Mobile Computing and Networking (MobiCom).", + "[69] Jie Xiong and Kyle Jamieson. 2013. ArrayTrack: A Fine-grained Indoor Location System. In Proceedings of the 10th USENIX Symposium on Networked Systems Design and Implementation (NSDI).", + "[70] Jie Xiong, Karthikeyan Sundaresan, and Kyle Jamieson. 2015. ToneTrack: Leveraging Frequency-agile Radios for Time-based Indoor Wireless Localization. In Proceedings of the 21st Annual International Conference on Mobile Computing and Networking (MobiCom).", + "[71] Feng Xu and Ke Wu. 2013. Understanding Leaky-wave Structures: A Special Form of Guided-wave Structure. IEEE Microwave Magazine 14, 5 (2013).", + "[72] Han Xu, Zheng Yang, Zimu Zhou, Ke Yi, and Chunyi Peng. [n. d]. Tum: Towards Ubiquitous Multi-device Localization for Cross-device Interaction. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM).", + "[73] Kun Yang, Xiaolong Zheng, Jie Xiong, Liang Liu, and Huadong Ma. 2022. Wilmg: Pushing the Limit of WiFi Sensing with Low Transmission Rates. In Proceedings of the 19th Annual IEEE International Conference on Sensing, Communication, and Networking (SECON).", + "[74] Yu Yang, Yi Ding, Dengpan Yuan, Guang Wang, Xiaoyang Xie, Yunhuai Liu, Tian He, and Desheng Zhang. 2020. Transloc: Transparent Indoor Localization with Uncertain Human Participation for Instant Delivery. In Proceedings of the 26th Annual International Conference on Mobile Computing and Networking (MobiCom).", + "[75] Zheng Yang, Zimu Zhou, and Yunhao Liu. 2013. From RSSI to CSI: Indoor Localization via Channel Response. Comput. Surveys 46, 2 (2013).", + "[76] Chia-Yi Yeh, Yasaman Ghasempour, Yasith Amarasinghe, Daniel M Mittleman, and Edward W Knightly. 2020. Security in Terahertz WLANs with Leaky Wave Antennas. In Proceedings of the 13th ACM Conference on Security and Privacy in Wireless and Mobile Networks (WiSec).", + "[77] Diana Zhang, Jingxian Wang, Junsu Jang, Junbo Zhang, and Swarun Kumar. 2019. On the Feasibility of Wi-Fi Based Material Sensing. In Proceedings of the 25st Annual International Conference on Mobile Computing and Networking (MobiCom).", + "[78] Jia Zhang, Xiuzhen Guo, Haotian Jiang, Xiaolong Zheng, and Yuan He. 2020. Link Quality Estimation of Cross-technology Communication. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM). 496-505.", + "[79] Jia Zhang, Xin Na, Rui Xi, Yimiao Sun, and Yuan He. 2023. mmHawkeye: Passive UAV Detection with a COTS mmWave Radar. In Proceedings of the 20th Annual IEEE International Conference on Sensing, Communication, and Networking (SECON).", + "[80] Jia Zhang, Rui Xi, Yuan He, Yimiao Sun, Xiuzhen Guo, Weiguo Wang, Xin Na, Yunhao Liu, Zhenguo Shi, and Tao Gu. 2023. A Survey of mmWave-based Human Sensing: Technology, Platforms and Applications. IEEE Communications Surveys & Tutorials (2023).", + "[81] Xianan Zhang, Wei Wang, Xuedou Xiao, Hang Yang, Xinyu Zhang, and Tao Jiang. 2020. Peer-to-Peer Localization for Single-antenna Devices. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 4, 3 (2020), 1-25.", + "[82] Zhenyong Zhang, Shibo He, Yuanchao Shu, and Zhiguo Shi. 2019. A Self-evolving WiFi-based Indoor Navigation System Using Smartphones. IEEE Transactions on Mobile Computing 19, 8 (2019), 1760-1774.", + "[83] Tianyue Zheng, Zhe Chen, Jun Luo, Lin Ke, Chaoyang Zhao, and Yaowen Yang 2021. SiWa: See into Walls via Deep UWB Radar. In Proceedings of the 27th Annual International Conference on Mobile Computing and Networking (MobiCom).", + "[84] Xiaolong Zheng, Jiliang Wang, Longfei Shangguan, Zimu Zhou, and Yunhao Liu 2016. Smokey: Ubiquitous Smoking Detection with Commercial WiFi Infrastructures. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM)." + ], + "bbox": [ + 517, + 108, + 911, + 837 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye", + "bbox": [ + 83, + 71, + 401, + 85 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Yimiao Sun, et al.", + "bbox": [ + 805, + 71, + 911, + 85 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "389", + "bbox": [ + 486, + 922, + 514, + 934 + ], + "page_idx": 13 + } +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06311/24099640-7df8-4696-ab55-8a633ecbda84_model.json b/data/2025/2504_06xxx/2504.06311/24099640-7df8-4696-ab55-8a633ecbda84_model.json new file mode 100644 index 0000000000000000000000000000000000000000..5436bcca0fdfa0d019c06c8a75b8ffd24a67f9b4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/24099640-7df8-4696-ab55-8a633ecbda84_model.json @@ -0,0 +1,4551 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.177, + 0.095, + 0.82, + 0.143 + ], + "angle": 0, + "content": "Bifrost: Reinventing WiFi Signals Based on Dispersion Effect for Accurate Indoor Localization" + }, + { + "type": "text", + "bbox": [ + 0.138, + 0.158, + 0.858, + 0.175 + ], + "angle": 0, + "content": "Yimiao Sun, Yuan He*, Jiacheng Zhang, Xin Na, Yande Chen, Weiguo Wang, Xiuzhen Guo" + }, + { + "type": "text", + "bbox": [ + 0.318, + 0.176, + 0.681, + 0.191 + ], + "angle": 0, + "content": "School of Software and BNrist, Tsinghua University" + }, + { + "type": "text", + "bbox": [ + 0.31, + 0.192, + 0.69, + 0.206 + ], + "angle": 0, + "content": "sym21@mails.tsinghua.edu.cn, heyuan@tsinghua.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.223, + 0.207, + 0.776, + 0.221 + ], + "angle": 0, + "content": "{zhangjc21,nx20,cyd22,wwg18}@mails.tsinghua.edu.cn, guoxiuzhen94@gmail.com" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.23, + 0.189, + 0.244 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.249, + 0.485, + 0.594 + ], + "angle": 0, + "content": "WiFi-based device localization is a key enabling technology for smart applications, which has attracted numerous research studies in the past decade. Most of the existing approaches rely on Line-of-Sight (LoS) signals to work, while a critical problem is often neglected: In the real-world indoor environments, WiFi signals are everywhere, but very few of them are usable for accurate localization. As a result, the localization accuracy in practice is far from being satisfactory. This paper presents Bifrost, a novel hardware-software co-design for accurate indoor localization. The core idea of Bifrost is to reinvent WiFi signals, so as to provide sufficient LoS signals for localization. This is realized by exploiting the dispersion effect of signals emitted by the leaky wave antenna (LWA). We present a low-cost plug-in design of LWA that can generate orthogonal polarized signals: On one hand, LWA disperses signals of different frequencies to different angles, thus providing Angle-of-Arrival (AoA) information for the localized target. On the other hand, the target further leverages the antenna polarization mismatch to distinguish AoAs from different LWAs. In the software layer, fine-grained information in Channel State Information (CSI) is exploited to cope with multipath and noise. We implement Bifrost and evaluate its performance under various settings. The results show that the median localization error of Bifrost is \\(0.81\\mathrm{m}\\), which is \\(52.35\\%\\) less than that of SpotFi, a state-of-the-art approach. SpotFi, when combined with Bifrost to work in the realistic settings, can reduce the localization error by \\(33.54\\%\\)." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.608, + 0.219, + 0.621 + ], + "angle": 0, + "content": "CCS CONCEPTS" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.628, + 0.48, + 0.655 + ], + "angle": 0, + "content": "- Networks \\(\\rightarrow\\) Location based services; \\(\\cdot\\) Information systems \\(\\rightarrow\\) Location based services;" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.668, + 0.197, + 0.682 + ], + "angle": 0, + "content": "KEYWORDS" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.687, + 0.482, + 0.715 + ], + "angle": 0, + "content": "WiFi Localization, Indoor Localization, Leaky Wave Antenna, RF Computing" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.731, + 0.265, + 0.744 + ], + "angle": 0, + "content": "\\(^{\\dagger}\\)Yuan He is the corresponding author." + }, + { + "type": "image", + "bbox": [ + 0.091, + 0.784, + 0.212, + 0.817 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.82, + 0.482, + 0.844 + ], + "angle": 0, + "content": "This work is licensed under a Creative Commons Attribution International 4.0 License." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.845, + 0.324, + 0.855 + ], + "angle": 0, + "content": "SenSys'23,November 12-17,2023,Istanbul,Turkiye" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.856, + 0.315, + 0.865 + ], + "angle": 0, + "content": "© 2023 Copyright held by the owner/author(s)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.866, + 0.288, + 0.875 + ], + "angle": 0, + "content": "ACM ISBN 979-8-4007-0414-7/23/11...$15.00" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.876, + 0.293, + 0.886 + ], + "angle": 0, + "content": "https://doi.org/10.1145/3625687.3625786" + }, + { + "type": "image", + "bbox": [ + 0.557, + 0.228, + 0.868, + 0.363 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.675, + 0.367, + 0.692, + 0.378 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.553, + 0.388, + 0.842, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.675, + 0.53, + 0.694, + 0.541 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.545, + 0.914, + 0.583 + ], + "angle": 0, + "content": "Figure 1: A model-driven method works well when (a) sufficient LoS signals are available but becomes inaccurate when (b) NLoS signals have to be used." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.602, + 0.662, + 0.613 + ], + "angle": 0, + "content": "ACM Reference Format:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.614, + 0.916, + 0.689 + ], + "angle": 0, + "content": "Yimiao Sun, Yuan He, Jiacheng Zhang, Xin Na, Yande Chen, Weiguo Wang, Xiuzhen Guo. 2023. Bifrost: Reinventing WiFi Signals Based on Dispersion Effect for Accurate Indoor Localization. In The 21st ACM Conference on Embedded Networked Sensor Systems (SenSys '23), November 12-17, 2023, Istanbul, Türkiye. ACM, New York, NY, USA, 14 pages. https://doi.org/10.1145/3625687.3625786" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.702, + 0.699, + 0.716 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.72, + 0.915, + 0.844 + ], + "angle": 0, + "content": "Location information [32, 63, 79, 80] is crucial, especially for smart indoor applications [50, 60, 67, 72], such as smart home [54, 62], indoor navigation [7, 18, 19, 59] and so on. Due to the ubiquitous deployment of WiFi access points (APs) and wide availability of WiFi modules on the devices, WiFi-based localization [16, 25, 49, 56, 57, 61, 64, 65, 68-70, 73, 74, 82] appears to be promising for indoor localization. The existing works of WiFi-based indoor localization can be broadly grouped into two categories, data-driven methods and model-driven methods." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.845, + 0.915, + 0.887 + ], + "angle": 0, + "content": "Data-driven methods are typically represented by fingerprint [14, 44, 61]. These methods need to collect Received Signal Strength (RSS) or CSI at different places to construct a database mapping RSS" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.923, + 0.516, + 0.935 + ], + "angle": 0, + "content": "376" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.073, + 0.402, + 0.087 + ], + "angle": 0, + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + }, + { + "type": "header", + "bbox": [ + 0.805, + 0.073, + 0.914, + 0.086 + ], + "angle": 0, + "content": "Yimiao Sun, et al." + }, + { + "type": "image", + "bbox": [ + 0.123, + 0.103, + 0.264, + 0.223 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.129, + 0.225, + 0.259, + 0.238 + ], + "angle": 0, + "content": "(a) Library (48 rooms)" + }, + { + "type": "image", + "bbox": [ + 0.302, + 0.103, + 0.443, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.321, + 0.225, + 0.441, + 0.237 + ], + "angle": 0, + "content": "(b) Office (54 rooms)" + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.243, + 0.481, + 0.269 + ], + "angle": 0, + "content": "Figure 2: The number of LoS APs in each room in a library and an office building." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.289, + 0.483, + 0.317 + ], + "angle": 0, + "content": "(or CSI) with locations, which is a labor-intensive process. Also, their performance may be vulnerable to dynamic environments." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.317, + 0.483, + 0.565 + ], + "angle": 0, + "content": "Model-driven methods induce less labor cost and attract more research studies. Generally, a model-driven method calculates the location by estimating signals' Angle-of-Arrival (AoA) [2, 23, 24, 69], Time-of-Flight (ToF) [70, 81] or both [9, 16, 43]. Most of the existing approaches rely on Line-of-Sight (LoS) signals to work, as Fig. 1(a) illustrates, while a critical problem is often neglected: In the real-world indoor environments, WiFi signals are everywhere, but very few of them are usable for accurate localization. As an example to validate this finding, Fig. 2 plots the statistics of the real deployment of WiFi APs in a library (48 rooms) and an office building (54 rooms). The data shows that in nearly half of all the rooms, there is not even one LoS AP available. The rooms with sufficient LoS signals account for less than \\(5\\%\\) of all the rooms. In other words, the chance for a WiFi device to receive sufficient LoS WiFi AP signals, namely the case for it to be accurately localized by using an existing approach, is less than \\(5\\%\\). That well explains why the practical performance of using the existing localization approaches is far from being satisfactory." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.566, + 0.483, + 0.688 + ], + "angle": 0, + "content": "A straightforward idea to address the above problem is to increase the number of deployed WiFi APs, until everywhere is covered by at least 3 LoS APs. It isn't practical, however. Taking the library and office building investigated in Fig. 2 as an example, typically there are 50 rooms in a building. Covering every room with 3 APs requires 150 APs to be deployed, which means multiple drawbacks, such as substantial deployment cost of cables (connecting the APs), overly crowded wireless spectrum, and frequent interference and collisions in the wireless communication." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.689, + 0.483, + 0.829 + ], + "angle": 0, + "content": "This paper presents a novel approach called Bifrost, a plug-and-play and cost-effective scheme to significantly enhance the availability of LoS WiFi signals and in turn the localization accuracy. In light of the research progress on leaky wave antenna (LWA) in recent years [21, 22, 42, 47, 48, 76], Bifrost exploits dispersion effect of wireless signals [33]. Deployed in the space covered by WiFi signals, a LWA can receive those signals and then radiate them at different frequencies towards different directions, exhibiting frequency and spatial division multiplexing (FSDM) features, as is reinventing2 WiFi signals." + }, + { + "type": "image", + "bbox": [ + 0.536, + 0.104, + 0.891, + 0.369 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.577, + 0.375, + 0.85, + 0.389 + ], + "angle": 0, + "content": "Figure 3: The high-level principle of Bifrost." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.425, + 0.914, + 0.521 + ], + "angle": 0, + "content": "Fig. 3 illustrates the high-level principle of Bifrost. To localize a target device, Bifrost uses two LWAs to transform WiFi signals into FSDM signals, so the target device will receive two LoS FSDM signals with a unique pair of frequencies. Since the frequency and the propagation direction of FSDM signals are coupled, the target device can estimate its AoAs to both LWAs by analyzing the received spectrum and then calculate its location." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.522, + 0.914, + 0.551 + ], + "angle": 0, + "content": "Compared with using WiFi APs, using LWA to assist localization offers the following two distinct advantages:" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.553, + 0.915, + 0.609 + ], + "angle": 0, + "content": "1) Cost-effective. The cost of a LWA in Bifrost is 7.41 USD (4.36 USD for the material cost and 3.05 USD for the control module), which is significantly lower than that of a WiFi AP (typically \\(30 \\sim 100\\) USD [3-6])." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.612, + 0.914, + 0.654 + ], + "angle": 0, + "content": "2) Easy to Use. Deploying a LWA is very convenient. It can operate in a plug-and-play manner without the need for connecting power cables." + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.553, + 0.915, + 0.654 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.655, + 0.914, + 0.721 + ], + "angle": 0, + "content": "Leveraging these two advantages, Bifrost can be easily implemented in any environment with WiFi coverage, no matter whether the WiFi signals are LoS or not. Bifrost can either work independently, or cooperatively with other conventional WiFi-based localization methods." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.722, + 0.914, + 0.749 + ], + "angle": 0, + "content": "The design of Bifrost tackles several critical challenges, which are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.753, + 0.915, + 0.891 + ], + "angle": 0, + "content": "Ambiguity between Different LWAs. As Fig. 3 shows, a target device may receive signals from two LWAs, which are reinvented from the same WiFi signal source. Without a special design, it is almost impossible for the target to distinguish one LWA from the other. To overcome this problem, the LWAs in Bifrost are designed to generate orthogonal circular polarized (CP) signals, so that they won't mix up with each other (§3.1). Polarization of LWA signals can be conveniently switched by altering the input port of WiFi signals, without the need for reconstruction or modifications to the LWA's structure." + }, + { + "type": "page_footnote", + "bbox": [ + 0.082, + 0.84, + 0.483, + 0.893 + ], + "angle": 0, + "content": "1 In Norse mythology, Bifrost is a rainbow bridge that reaches between Midgard (Earth) and Asgard (the realm of gods). \n2 The word \"reinventing\" means that Bifrost makes WiFi signals look different from their original form by using the LWAs. The signal emitted by the LWAs has two new properties, dispersion effect and circular polarization." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.923, + 0.515, + 0.935 + ], + "angle": 0, + "content": "377" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.073, + 0.139, + 0.086 + ], + "angle": 0, + "content": "Bifrost" + }, + { + "type": "header", + "bbox": [ + 0.594, + 0.072, + 0.914, + 0.088 + ], + "angle": 0, + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.104, + 0.278, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.097, + 0.222, + 0.264, + 0.236 + ], + "angle": 0, + "content": "(a) Linear polarization (LP)." + }, + { + "type": "image", + "bbox": [ + 0.284, + 0.104, + 0.508, + 0.211 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.312, + 0.222, + 0.491, + 0.236 + ], + "angle": 0, + "content": "(b) Circular polarization (CP)." + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.104, + 0.73, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.542, + 0.222, + 0.724, + 0.237 + ], + "angle": 0, + "content": "(c) Elliptical polarization (EP)." + }, + { + "type": "image", + "bbox": [ + 0.731, + 0.104, + 0.892, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.757, + 0.222, + 0.894, + 0.236 + ], + "angle": 0, + "content": "(d) CP signal synthesis." + }, + { + "type": "image_caption", + "bbox": [ + 0.318, + 0.247, + 0.68, + 0.261 + ], + "angle": 0, + "content": "Figure 4: The properties of polarized electromagnetic waves." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.282, + 0.484, + 0.42 + ], + "angle": 0, + "content": "Signal Extraction from the Interfered Frequency Band. Since FSDM signals radiated by LWAs are transformed from existing WiFi signals, the two types of signals operate within the same frequency band and can be simultaneously received by a target device. Directly using such signals leads to erroneous AoA estimation. To deal with such interference, LWAs in Bifrost work in a duty-cycled manner. The target device is able to detect distinctive variation of the signal amplitude at the frequencies of FSDM signals (§3.3). By analyzing WiFi CSI, the target device can effectively extract the desired FSDM signals from the interfered frequency band." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.422, + 0.484, + 0.563 + ], + "angle": 0, + "content": "Indoor Multipath Effect. The multipath effect in the indoor environment may seriously affect the quality of the received FSDM signals and further affect the localization accuracy. In order to identify FSDM signals propagating along the LoS path, Bifrost operates in two steps. First, we map frequencies of FSDM signals with subcarriers in CSI and cluster adjacent subcarriers to only retain the cluster with the highest energy (§3.4). Second, we take the intersection of two clusters (corresponding to the two orthogonal CP signals), and determine the final frequency by weighting the center frequency of the remaining clustered subcarriers (§3.5)." + }, + { + "type": "text", + "bbox": [ + 0.099, + 0.563, + 0.399, + 0.576 + ], + "angle": 0, + "content": "Our contributions can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.579, + 0.483, + 0.649 + ], + "angle": 0, + "content": "1) We tackle a significant problem, namely the limited availability of LoS signals, which is overlooked by the existing works on WiFi-based indoor localization. We reinvent WiFi signals by exploiting the dispersion effect, which represents a new direction of utilizing LWAs." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.652, + 0.482, + 0.707 + ], + "angle": 0, + "content": "2) We address a series of non-trivial challenges, such as signal ambiguity, interference, and multipath effect, etc. The design of Bifrost effectively ensures the quality of signals used for localization." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.711, + 0.483, + 0.794 + ], + "angle": 0, + "content": "3) We implement Bifrost and evaluate its performance under various settings. The results show that the median localization error of Bifrost is \\(0.81\\mathrm{m}\\), which is \\(52.35\\%\\) less than that of SpotFi, a state-of-the-art approach. SpotFi, when combined with Bifrost to work in the realistic settings, can reduce the localization error by \\(33.54\\%\\)." + }, + { + "type": "list", + "bbox": [ + 0.087, + 0.579, + 0.483, + 0.794 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.795, + 0.484, + 0.877 + ], + "angle": 0, + "content": "This paper proceeds as follows: §2 introduces background knowledge on the signal polarization and the LWA. Then §3 unfolds the design of Bifrost in both hardware and software. The implementation and evaluation results are presented in §4. We discuss practical issues in §5 and summarize related works in §6. This work is concluded in §7." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.281, + 0.62, + 0.294 + ], + "angle": 0, + "content": "2 PRIMER" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.299, + 0.915, + 0.327 + ], + "angle": 0, + "content": "This section introduces preliminary knowledge of our work: polarization of wireless signals and leaky wave antenna." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.34, + 0.712, + 0.356 + ], + "angle": 0, + "content": "2.1 Signal Polarization" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.358, + 0.912, + 0.4 + ], + "angle": 0, + "content": "Polarization is a fundamental property of wireless signals, including FSDM and WiFi signals investigated in this work. It represents the direction of the signal's electric field, which can be denoted as \\(\\vec{E}\\)" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.404, + 0.911, + 0.461 + ], + "angle": 0, + "content": "and can be decomposed into the horizontal component \\(\\overrightarrow{E_x}\\) and the vertical component \\(\\overrightarrow{E_y}\\). There will be a phase difference \\(\\Delta \\phi \\in [0, \\pi)\\) between these two orthogonal components, leading to the following elliptic equation" + }, + { + "type": "equation", + "bbox": [ + 0.552, + 0.468, + 0.872, + 0.506 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{c} - \\overrightarrow {E _ {x}} - 2 \\\\ \\frac {E _ {x 0}}{E _ {x 0}} \\end{array} + \\frac {E _ {y}}{E _ {y 0}} - \\frac {2 \\overrightarrow {E _ {x}} \\overrightarrow {E _ {y}}}{E _ {x 0} E _ {y 0}} \\cos (\\Delta \\phi) = \\sin^ {2} (\\Delta \\phi),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.504, + 0.91, + 0.533 + ], + "angle": 0, + "content": "where \\(E\\) and \\(E\\) are amplitudes of \\(\\overrightarrow{E}\\) and \\(\\overrightarrow{E}\\). According to the" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.534, + 0.91, + 0.563 + ], + "angle": 0, + "content": "value of \\(\\Delta \\phi\\), the polarization of \\(\\overrightarrow{E}\\) can be divided into the following three categories:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.566, + 0.912, + 0.621 + ], + "angle": 0, + "content": "When \\(\\Delta \\phi = 0\\) or \\(\\pi\\): we have \\(\\overrightarrow{E_y} = \\frac{E_{y0}}{\\pm E_{x0}}\\overrightarrow{E_x}\\), so the signal is linear polarized (LP), as shown in Fig. 4(a). The polarization direction hinges on \\(\\pm \\frac{E_{y0}}{E_{x0}}\\), the ratio of \\(\\overrightarrow{E_x}\\) and \\(\\overrightarrow{E_y}\\)." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.622, + 0.915, + 0.722 + ], + "angle": 0, + "content": "When \\(\\Delta \\phi = \\pm \\frac{\\pi}{2}\\): we have \\(\\vec{E}^2 + \\vec{E}^2 = \\vec{E}^2\\), and now the signal is circular polarized (CP), as Fig. 4(b) illustrates. Besides, Fig. 4(d) provides another perspective on how the CP signal is decomposed into two LP signals. Depending on whether \\(\\Delta \\phi\\) is positive or negative, the rotation direction of the CP signal is in either left-hand circular polarization (LHCP) or right-hand circular polarization (RHCP), which are orthogonal and won't interfere with each other." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.725, + 0.915, + 0.768 + ], + "angle": 0, + "content": "When \\(\\Delta \\phi\\) is Other Values: the signal is elliptical polarized (EP), as Fig. 4(c) depicts. Similar to the CP signal, the EP signal also can be divided into left-hand or right-hand." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.77, + 0.912, + 0.853 + ], + "angle": 0, + "content": "Impact of Polarization on the Rx: The polarization of a signal is accorded with that of its transmitting antenna but may change during propagation. To ensure effective reception, it should match the polarization of the receiving antenna, partially at least. Fig. 5 illustrates how polarization mismatch affects the received signal strength (RSS)." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.853, + 0.912, + 0.895 + ], + "angle": 0, + "content": "For the LP signal and antenna, RSS decreases as the angle of these two polarization directions increases from \\(0^{\\circ}\\) to \\(90^{\\circ}\\). For the CP signal, the signal can be decomposed into two orthogonal LP" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.923, + 0.516, + 0.935 + ], + "angle": 0, + "content": "378" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.073, + 0.402, + 0.087 + ], + "angle": 0, + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + }, + { + "type": "header", + "bbox": [ + 0.805, + 0.073, + 0.914, + 0.086 + ], + "angle": 0, + "content": "Yimiao Sun, et al." + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.104, + 0.483, + 0.352 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.364, + 0.483, + 0.389 + ], + "angle": 0, + "content": "Figure 5: RSS variation according to the polarization of signals and Rx." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.415, + 0.483, + 0.525 + ], + "angle": 0, + "content": "signals. Thus, the LP antenna can only receive the component whose polarization direction is parallel to itself but loses half of the signal energy. Similarly, the CP antenna can only receive half of the LP signal's energy. However, when LHCP antenna is used to receive RHCP signals or vice versa, RSS is theoretically zero because these two polarizations are orthogonal. That is the reason why Bifrost can eliminate the ambiguity of two FSDM signals radiated from different LWAs." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.538, + 0.303, + 0.554 + ], + "angle": 0, + "content": "2.2 Leaky Wave Antenna" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.557, + 0.485, + 0.641 + ], + "angle": 0, + "content": "LWA belongs to the class of traveling-wave antennas, where the propagating wave inside the antenna structure can \"leak\" (i.e., radiate) from the waveguide to the free space, hence the name. It can distinctively couple the leaky wave's frequency and radiation direction to produce a frequency and spatial division multiplexing (FSDM) signal, as shown in Fig. 6. Specifically, direction of the" + }, + { + "type": "equation", + "bbox": [ + 0.084, + 0.646, + 0.412, + 0.673 + ], + "angle": 0, + "content": "\\[\n\\text {s i g n a l} \\overrightarrow {E _ {f}} \\text {w i t h f r e q u e n c y} f \\text {c a n b e d e t e r m i n e d b y [ 7 1 ] :}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.21, + 0.673, + 0.482, + 0.693 + ], + "angle": 0, + "content": "\\[\n\\theta (f) = \\operatorname {a r c c o s} _ {k _ {0} (f)} ^ {\\prime} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.7, + 0.483, + 0.729 + ], + "angle": 0, + "content": "where \\(\\beta (f)\\) and \\(k_{0}(f)\\) are the phase constant along the LWA and the propagation constant in the free space w.r.t \\(E_{f}\\) [52]." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.729, + 0.483, + 0.867 + ], + "angle": 0, + "content": "Currently, two main types of LWAs have been extensively studied. 1) The uniform LWA, which employs a metallic waveguide with a slit cut along its length [21, 22, 42, 76], as depicted in Fig. 6(b). The FSDM signal leaked from a uniform LWA can only propagate towards the forward region (i.e., \\(\\theta^0\\), \\(90^\\circ\\)). 2) The periodic LWA, which is typically designed using a dielectric substrate with a periodic array of metal strips (i.e., slots) [10-13] and similar to an antenna array, as shown in Fig. 6(a). The FSDM signal of this type of LWA can propagate towards both forward and backward regions (i.e., \\(\\theta^0\\), \\(180^\\circ\\)) [33]." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.867, + 0.48, + 0.895 + ], + "angle": 0, + "content": "Periodic LWA has been widely studied in recent research due to its versatile slot design and low-cost fabrication using the printed" + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.104, + 0.912, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.558, + 0.235, + 0.868, + 0.249 + ], + "angle": 0, + "content": "Figure 6: Typical structures of leaky wave antenna3." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.274, + 0.915, + 0.316 + ], + "angle": 0, + "content": "circuit board (PCB) process. These attributes have made it a popular choice in various applications. Bifrost also employs the periodic structure to produce circular polarized signals." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.328, + 0.627, + 0.342 + ], + "angle": 0, + "content": "3 BIFROST" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.345, + 0.915, + 0.402 + ], + "angle": 0, + "content": "In this section, we first articulate how to design the circular polarized LWA (i.e., CPLWA) to transform the input LP signal into the CP signal with the FSDM feature. Then, we present details of our approach of localization with the CPLWA." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.414, + 0.684, + 0.431 + ], + "angle": 0, + "content": "1.1 CPLWA Design" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.433, + 0.915, + 0.605 + ], + "angle": 0, + "content": "Unlike many traditional LWAs [10, 13, 21, 22], Bifrost utilizes \\(\\mathrm{CP^4}\\) (i.e., RHCP and LHCP) to distinguish different LWAs and corresponding FSDM signals. We specially design a CPLWA that can generate both LHCP and RHCP signals. As shown in Fig. 7(a), our CPLWA has both vertical and horizontal slots to generate orthogonal LP signals, and further to form the CP signal (the bifurcation is designed for performance optimization). According to Eq. (1), a \\(\\frac{\\pi}{2}\\) phase difference between two LP signals is necessary to generate the CP signal, and this is achieved by adjusting the length of the slots. Denoting the guided wavelength at \\(5.25\\mathrm{GHz}\\) of the substrate material is \\(\\lambda_{g}\\), the distance between the center of the horizontal and the vertical slots is \\(\\frac{\\lambda_g}{4}\\)." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.604, + 0.915, + 0.742 + ], + "angle": 0, + "content": "In the fabrication process of CPLWA, we adopt a two-layer copper-clad substrate structure, as shown in Fig. 7(b). The substrate material is F4BM-2, whose permittivity \\(\\epsilon = 3.02\\). The top and bottom layers of the substrate consist of copper and have undergone tin immersion plating to prevent oxidation. The bottom layer of copper functions as the ground, and the shorting vias are incorporated to penetrate the substrate, connecting the top and bottom layers in order to ground the top layer. These shorting vias are periodically arranged on the upper and lower boundaries of the substrate and the patch." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.742, + 0.916, + 0.84 + ], + "angle": 0, + "content": "The final structure of our proposed CPLWA is depicted in Fig. 7(c), where multiple units are linearly arranged together to enhance the directivity of the FSDM signal, which is similar to the antenna array. Note that a CPLWA is composed of 6 units as an illustration, but 11 units are arranged in practice. This CPLWA features two ports on both ends: one is the feed port that connects to an LP antenna for absorbing the WiFi signal, and the other should connect to a" + }, + { + "type": "page_footnote", + "bbox": [ + 0.514, + 0.851, + 0.915, + 0.882 + ], + "angle": 0, + "content": "It is worth noting that the 2D radiation pattern is used here for illustration purposes. In reality, the radiation pattern of the leaky wave with a specific frequency is more like a cone, with a generatrix along the propagation direction of the traveling wave." + }, + { + "type": "page_footnote", + "bbox": [ + 0.516, + 0.882, + 0.888, + 0.894 + ], + "angle": 0, + "content": "4Unless otherwise specified, CP signals stand for both RHCP and LHCP signals." + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.851, + 0.915, + 0.894 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.923, + 0.515, + 0.935 + ], + "angle": 0, + "content": "379" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.073, + 0.138, + 0.086 + ], + "angle": 0, + "content": "Bifrost" + }, + { + "type": "header", + "bbox": [ + 0.595, + 0.073, + 0.913, + 0.087 + ], + "angle": 0, + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + }, + { + "type": "image", + "bbox": [ + 0.124, + 0.104, + 0.275, + 0.206 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.129, + 0.209, + 0.248, + 0.22 + ], + "angle": 0, + "content": "(a) Unit of the LWA." + }, + { + "type": "image", + "bbox": [ + 0.286, + 0.109, + 0.442, + 0.2 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.306, + 0.209, + 0.433, + 0.221 + ], + "angle": 0, + "content": "(b) Layered structure." + }, + { + "type": "image", + "bbox": [ + 0.089, + 0.228, + 0.479, + 0.292 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.211, + 0.296, + 0.331, + 0.309 + ], + "angle": 0, + "content": "(c) Complete design." + }, + { + "type": "image_caption", + "bbox": [ + 0.131, + 0.314, + 0.433, + 0.328 + ], + "angle": 0, + "content": "Figure 7: General view of CPLWA used in Bifrost." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.353, + 0.482, + 0.422 + ], + "angle": 0, + "content": "matched \\(50\\Omega\\) load. By changing the signal feed port, polarization of the FSDM signal can switch between LHCP and RHCP. If the input signal has gone through all slots and reached the other end, yet still has energy remaining, the matched load will absorb the excess signal." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.423, + 0.482, + 0.478 + ], + "angle": 0, + "content": "The CPLWA used in Bifrost is specially designed at 5.17GHz-5.33GHz WiFi band, while this structure and design methodology are universally applicable for other frequencies and bandwidths by properly modifying the relevant parameters." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.479, + 0.483, + 0.644 + ], + "angle": 0, + "content": "Now we conduct a quick validation to show the key performance of the proposed CPLWA using ANSYS HFSS. Firstly, the direction of the FSDM signal \\( w \\) r.t different frequencies is depicted in Fig. 8(a). There is a total \\( 22^{\\circ} \\) field of view (FoV) across the operating frequency band (5.17GHz-5.33GHz). Note that when the LP signal is fed into the right port or left port, the RHCP or LHCP signal will be radiated from \\( 22^{\\circ} \\) to \\( 44^{\\circ} \\) or \\( 136^{\\circ} \\) to \\( 158^{\\circ} \\), respectively. Fig. 8(b) shows the energy distribution of signals at five different frequencies. It is evident that the energy of the leaky signal concentrates on the correct direction, and their realized gains are all above 11.5dB. Therefore, the direction can be easily identified by examining the energy distribution of signals." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.644, + 0.483, + 0.672 + ], + "angle": 0, + "content": "With the proposed CPLWA, we will proceed with elaborating on the core localization algorithm in Bifrost." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.685, + 0.328, + 0.699 + ], + "angle": 0, + "content": "1.2 Basic Localization Model" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.703, + 0.483, + 0.87 + ], + "angle": 0, + "content": "Let \\( S_{l} \\) and \\( S_{r} \\) respectively denote LHCP and RHCP signals that propagate from corresponding LWAs to the target via the LoS paths. The frequencies of these two signals, \\( f_{l} \\) and \\( f_{r} \\), are what we desire for calculating the location. Recall that \\( S_{l} \\) and \\( S_{r} \\) are featured in frequency and space division multiplexing (FSDM) and orthogonal \\( \\mathrm{CP}^5 \\), so these two signals won't interfere with each other. As a result, the target can estimate its relative direction to both LWAs based on the received spectrum and the radiation pattern of the two LWAs. Further, given locations of two LWAs, \\( L_{r} \\), \\( \\kappa_{r} \\), \\( y_{r} \\), \\( z_{r} \\) of the RHCP LWA and \\( L_{l} \\), \\( \\kappa_{l} \\), \\( y_{l} \\), \\( z_{l} \\) of the LHCP LWA, the target can output its absolute location. In detail, as we mentioned in §2, the radiation pattern of the LWA is a conical" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.109, + 0.721, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.539, + 0.253, + 0.685, + 0.264 + ], + "angle": 0, + "content": "(a) Main beam direction." + }, + { + "type": "image", + "bbox": [ + 0.732, + 0.109, + 0.915, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.765, + 0.253, + 0.866, + 0.265 + ], + "angle": 0, + "content": "(b) Realized gain." + }, + { + "type": "image_caption", + "bbox": [ + 0.607, + 0.274, + 0.825, + 0.287 + ], + "angle": 0, + "content": "Figure 8: Key results of the CPLWA." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.31, + 0.912, + 0.377 + ], + "angle": 0, + "content": "surface at a specific frequency. Therefore, the location \\( L_{t} \\) (\\( x_{t}, y_{t} \\)) of the target device is the intersection point of the two conical surfaces and the horizontal plane of its height. By combining these conditions, \\( L_{t} \\) can be estimated by solving the following equation set:" + }, + { + "type": "equation", + "bbox": [ + 0.634, + 0.378, + 0.909, + 0.413 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} F \\left(L _ {r}, f _ {r}\\right), \\\\ L _ {t} = \\left(x _ {t}, y _ {t}\\right) = F \\left(L _ {l}, f _ {l}\\right) \\tag {3} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.426, + 0.915, + 0.509 + ], + "angle": 0, + "content": "where \\( z_{t} \\) is the target's height; functions \\( F(r, f_{r}) \\) and \\( F(l, f_{l}) \\) are mathematical equations of conical surfaces with the location of LWAs as the vertex. These two equations indicate the propagation directions of RHCP and LHCP signals at frequencies \\( f_{r} \\) and \\( f_{l} \\), respectively. Taking the RHCP signal as an example, \\( F = F(L_{r}, f_{r}) \\) can be formulated as" + }, + { + "type": "equation", + "bbox": [ + 0.594, + 0.514, + 0.918, + 0.543 + ], + "angle": 0, + "content": "\\[\nF = (x - x _ {r}) ^ {2} - \\frac {(y - y _ {r}) ^ {2}}{a ^ {2}} - \\frac {(z - z _ {r}) ^ {2}}{a ^ {2}}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.545, + 0.651, + 0.559 + ], + "angle": 0, + "content": "where \\(a = \\cot [\\theta (f_r)]\\)" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.56, + 0.914, + 0.684 + ], + "angle": 0, + "content": "However, there are two other types of signals impacting the localization accuracy when Bifrost functions: 1) LP WiFi signal that is emitted by the WiFi AP, and then received by the target. This signal establishes data communication between the target and the AP and propagates in both the LoS path and multipath. It is also the input signal of LWAs, which will be transformed into FSDM signals by the LWAs. 2) CP multipath signal that propagates from LWAs to the target after reflection, resulting in undesired noisy signals at the target." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.685, + 0.916, + 0.741 + ], + "angle": 0, + "content": "Thus, we should first identify the frequency of the FSDM signal from the LP WiFi signal (discussed in §3.3) and then filter out the CP multipath signal as much as possible (discussed in §3.4 and §3.5), to accurately estimate frequencies, \\( f_{l} \\) and \\( f_{r} \\), and the target's location." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.751, + 0.862, + 0.768 + ], + "angle": 0, + "content": "1.3 Identifying Frequencies of CP signals" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.77, + 0.915, + 0.895 + ], + "angle": 0, + "content": "When Bifrost functions, LWAs need the LP WiFi signal as input, and the target device may also need it for data communication with the WiFi AP. Nevertheless, the LP signal may interfere with the reception of the CP signal, because CP antennas at the target device can receive the LP signal (as already explained in §2). To cancel this interference, we control LWAs to be periodically turned on and off, working in a duty-cycled manner. This design allows the target to identify frequencies that correspond to the CP signal by analyzing the variation in its received spectrum, and at the same" + }, + { + "type": "page_footnote", + "bbox": [ + 0.084, + 0.882, + 0.389, + 0.895 + ], + "angle": 0, + "content": "5 Unless stated otherwise, CP signals have the property of FSDM." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.923, + 0.515, + 0.935 + ], + "angle": 0, + "content": "380" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.072, + 0.402, + 0.087 + ], + "angle": 0, + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + }, + { + "type": "header", + "bbox": [ + 0.805, + 0.073, + 0.914, + 0.086 + ], + "angle": 0, + "content": "Yimiao Sun, et al." + }, + { + "type": "image", + "bbox": [ + 0.087, + 0.099, + 0.482, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.175, + 0.193, + 0.388, + 0.206 + ], + "angle": 0, + "content": "(a) Normalized amplitude variation." + }, + { + "type": "image", + "bbox": [ + 0.087, + 0.222, + 0.482, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.187, + 0.302, + 0.378, + 0.315 + ], + "angle": 0, + "content": "(b) Normalized phase variation." + }, + { + "type": "image_caption", + "bbox": [ + 0.173, + 0.321, + 0.39, + 0.334 + ], + "angle": 0, + "content": "Figure 9: Standardized CSI variation." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.353, + 0.485, + 0.49 + ], + "angle": 0, + "content": "time, saves energy of LWAs. Specifically, we exploit WiFi CSI [27, 28, 75, 78] to explore fine-grained information on the amplitude and phase of the subcarriers. Fig. 9 illustrates the result of a proof-of-concept experiment, where subcarriers correspond to LoS and multipath signals are distinguishable in the normalized amplitude of CSI. However, the variation in phase is not obvious, making it challenging to discern useful subcarriers because they are often obscured by random errors and noise. According to this result, we can only extract frequencies of the CP signal based on the amplitude variation in CSI." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.491, + 0.484, + 0.561 + ], + "angle": 0, + "content": "As a LWA turns on or off, we denote the corresponding CSI as \\( H_{on}(f_k) \\) and \\( H_{off}(f_k) \\) for the \\( k \\)-th subcarrier with center frequency \\( f_k \\), respectively. The former is jointly influenced by CP and LP signals, while the latter is determined by the LP signal only, leading to the following relationship:" + }, + { + "type": "equation", + "bbox": [ + 0.171, + 0.568, + 0.481, + 0.609 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| H _ {o n} \\left(f _ {k}\\right) \\right\\| = \\left\\| H ^ {C P} \\left(f _ {k}\\right) + H ^ {L P} \\left(f _ {k}\\right) \\right\\|, \\\\ \\left\\| H _ {o f f} \\left(f _ {k}\\right) \\right\\| = \\left\\| H ^ {L P} \\left(f _ {k}\\right) \\right\\|, \\tag {5} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.615, + 0.483, + 0.671 + ], + "angle": 0, + "content": "where \\( |H^{CP}f_k| \\) is the amplitude of subcarriers corresponding to the CP signal, and \\( \\| H^{LP}(f_k)\\| \\) is that of the LP signal. Based on these two values, we can quantify the variation of CSI caused by the CP signal:" + }, + { + "type": "equation", + "bbox": [ + 0.136, + 0.671, + 0.483, + 0.736 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\| \\Delta H (f) \\| = \\| H ^ {C P} (f) \\| _ {k} \\\\ = \\| H ^ {C P} \\left(f _ {k}\\right) + H ^ {L P} \\left(f _ {k}\\right) \\| - \\| H ^ {L P} \\left(f _ {k}\\right) \\| \\tag {6} \\\\ = \\| H _ {o n} \\left(f _ {k}\\right) \\| - \\| H _ {o f f} \\left(f _ {k}\\right) \\| \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.742, + 0.485, + 0.894 + ], + "angle": 0, + "content": "In order to accurately analyze this variation and mitigate the effect of occasional outliers and noise, a Z-Score normalization procedure is performed on \\(\\Delta H(f_{\\mathbb{H}})\\). We execute a preliminary screening to quickly filter out the subcarriers that are less likely corresponding to the frequencies of the CP signals. A percentage threshold \\(\\varepsilon \\in \\mathbb{P}, 1\\) is set to select subcarriers with a larger value of \\(\\Delta H f_{k}\\) indicating that these subcarriers undergo significant changes and are more likely to be affected by the CP signal. The value of \\(\\varepsilon\\) is chosen empirically based on the degree of multipath. Fig. 10(a) shows a high-level overview of the selected subcarriers, where LHCP and RHCP signals are highlighted in red and blue," + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.106, + 0.918, + 0.255 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.601, + 0.263, + 0.829, + 0.277 + ], + "angle": 0, + "content": "(a) Selecting frequencies of CP signals." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.294, + 0.918, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.612, + 0.399, + 0.817, + 0.413 + ], + "angle": 0, + "content": "(b) Filtering out multipath signals." + }, + { + "type": "image", + "bbox": [ + 0.565, + 0.436, + 0.671, + 0.529 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.552, + 0.537, + 0.678, + 0.551 + ], + "angle": 0, + "content": "(c) Align subcarriers." + }, + { + "type": "image", + "bbox": [ + 0.725, + 0.435, + 0.868, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.717, + 0.537, + 0.865, + 0.551 + ], + "angle": 0, + "content": "(d) Estimate frequencies." + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.563, + 0.914, + 0.59 + ], + "angle": 0, + "content": "Figure 10: Workflow of selecting correct frequencies (LHCP and RHCP are distinguished by red and blue colors)." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.613, + 0.913, + 0.641 + ], + "angle": 0, + "content": "respectively. In subsequent stages, we exclusively focus on these selected subcarriers." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.653, + 0.831, + 0.67 + ], + "angle": 0, + "content": "1.4 Filtering out the Multipath Signal" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.671, + 0.917, + 0.852 + ], + "angle": 0, + "content": "As shown in Fig. 9(a), even though we have identified the frequencies of the CP signal from the WiFi signal, there still exists the multipath signal, resulting in undesired variation in \\(\\Delta H\\). Note that the multipath signal is mainly introduced by reflection of the CP FSDM signal. We find that subcarriers corresponding to the multipath signal can be divided into two categories: 1) Sparsely clustered subcarriers \\(C_s\\): FSDM signal with different frequencies and propagation directions may go through reflection at many places, but only a few of those signals reach the target with inconsecutive frequencies, resulting in many sparse clusters of subcarriers6. 2) Compactly clustered subcarriers \\(C_c\\): There are some FSDM signals with frequencies close to that of the LoS signal. Those FSDM signals reflect just right near the target device, which will result in a" + }, + { + "type": "page_footnote", + "bbox": [ + 0.514, + 0.861, + 0.916, + 0.894 + ], + "angle": 0, + "content": "6 The polarization of the signals may flip after reflection, and we deal with it as the multipath signal in the frequency domain. Thus, this flip doesn't affect the function of our algorithm." + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.923, + 0.513, + 0.935 + ], + "angle": 0, + "content": "381" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.073, + 0.138, + 0.086 + ], + "angle": 0, + "content": "Bifrost" + }, + { + "type": "header", + "bbox": [ + 0.595, + 0.073, + 0.913, + 0.087 + ], + "angle": 0, + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.481, + 0.135 + ], + "angle": 0, + "content": "compact and wide cluster of subcarriers influenced by multipath and LoS signals." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.135, + 0.482, + 0.191 + ], + "angle": 0, + "content": "Here we first try to filter out \\( C_s \\). To do so, all the varied subcarriers are clustered, respectively, as Fig. 10(b) illustrates. Then, the following integral function will be calculated for every cluster to find the one most likely to be corresponding to the LoS signal," + }, + { + "type": "equation", + "bbox": [ + 0.2, + 0.193, + 0.48, + 0.234 + ], + "angle": 0, + "content": "\\[\nC ^ {i} = \\begin{array}{c} \\int f _ {k} ^ {i} \\\\ f _ {k _ {\\mathrm {m i}}} ^ {i} \\end{array} \\| \\Delta H (f _ {k} ^ {i}) \\| d f _ {k} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.234, + 0.482, + 0.314 + ], + "angle": 0, + "content": "where \\( f_{k_{\\min}}^{i} \\) and \\( f_{k_{\\max}}^{i} \\) are the minimum and maximum frequencies of the \\( i \\)-th cluster, respectively. The value of \\( C^i \\) can be regarded as the area formed by the curve of \\( \\| \\Delta H(f_k^i)\\| \\) and the two frequencies \\( f_{k_{\\min}}^{i},f_{k_{\\max}}^{i} \\). The wider the bandwidth and higher the amplitude of a cluster are, the greater the value of its \\( C^i \\) is." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.315, + 0.484, + 0.399 + ], + "angle": 0, + "content": "After that, we only retain the cluster that bears the highest \\( C^i \\), which is most likely to be \\( C_c \\) and contains subcarriers corresponding to the LoS signal. However, as we mentioned before, some subcarriers in \\( C_c \\) are also corresponding to the undesired multipath signal. Next, we are going to purify \\( C_c \\) by narrowing down its frequency range as much as possible." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.409, + 0.48, + 0.434 + ], + "angle": 0, + "content": "1.5 Purifying the LoS Signal for Localization Denote the frequency range of \\(C_c\\) as \\(k^{\\prime}\\), \\(k_{\\mathrm{max}}\\) for RHCP signals" + }, + { + "type": "text", + "bbox": [ + 0.314, + 0.437, + 0.338, + 0.445 + ], + "angle": 0, + "content": "min" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.446, + 0.483, + 0.564 + ], + "angle": 0, + "content": "and \\( k_{\\mathrm{mi}}^l, k_{\\mathrm{max}}^l \\) for LHCP signals. In both of the two ranges, we are going to find the subcarrier with the largest \\( \\Delta H(f_k|g) \\) as Fig. 10(c) illustrates. After obtaining them, we denote the index of selected subcarriers as \\( K^r \\) and \\( K^l \\). Next, as Fig. 10(c) depicts, we align \\( K^r \\) and \\( K^l \\), then trim the head and tail to retain the intersection of two clusters, \\( \\| \\Delta H^r (f_k) \\| \\) and \\( \\| \\Delta H^l (f_k) \\| \\). Finally, we multiply \\( \\| \\Delta H^r (f_k) \\| \\) and \\( \\| \\Delta H^l (f_k) \\| \\) to form a weight matrix \\( G \\), which is illustrated in Fig. 10(d)." + }, + { + "type": "equation", + "bbox": [ + 0.095, + 0.568, + 0.482, + 0.639 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| \\Delta H ^ {r} \\left(f _ {K ^ {r} - \\delta}\\right) \\right\\| \\\\ G = \\quad \\dots \\quad \\times \\left\\| \\Delta H ^ {l} \\left(f _ {K ^ {l} - \\delta}\\right) \\right\\| \\dots \\left\\| \\Delta H ^ {l} \\left(f _ {K ^ {l} + \\delta}\\right) \\right\\| \\tag {8} \\\\ \\left\\| \\Delta H ^ {T} \\left(f _ {K ^ {T} + \\delta}\\right) \\right\\| \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.647, + 0.483, + 0.665 + ], + "angle": 0, + "content": "whenteh,ivahs theegh andhylvostnabatngthe wengthafverage" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.668, + 0.482, + 0.785 + ], + "angle": 0, + "content": "of values in \\([f_{K^{\\text{r}}\\delta}, f_{K^{\\text{r}}\\delta}]\\) and \\(f_{K^{\\text{l}}\\delta}, f_{K^{\\text{l}}\\delta}\\), which are weighted by the corresponding values in the matrix \\(G\\). The purpose of this step is still to mitigate the interference of the multipath signal. After that, the estimated values of the two frequencies will be fed into Eq. (4) to output an estimation of the target's location. Note that if there are multiple WiFi links for selection, one can choose the link that results in the smallest size of \\(\\| \\Delta H(f_k)\\|\\), meaning that the range of LoS signals' frequency is reduced to the minimum." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.785, + 0.483, + 0.895 + ], + "angle": 0, + "content": "Note that the basis of our localization algorithm is using the different CP signals to distinguish different LWAs, and the CP signals can't be replaced by the LP signals. The reason is that the LP signals may lead to high localization errors or even the breakdown of the localization system. Specifically, once the orientation of LP devices changes, polarization directions of these devices change accordingly. As such, each receiving antenna is very likely to receive FSDM signals from both LWAs and can't distinguish them." + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.102, + 0.912, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.625, + 0.236, + 0.804, + 0.249 + ], + "angle": 0, + "content": "Figure 11: Hardware Settings." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.254, + 0.915, + 0.371 + ], + "angle": 0, + "content": "For example, a receiving antenna with \\(0^{\\circ}\\) polarization can receive both \\(0^{\\circ}\\) and \\(90^{\\circ}\\) polarized FSDM signals after rotating \\(45^{\\circ}\\). In this case, the target can't distinguish FSDM signals from the two LWAs, and then the localization system can't work. Note that this problem can't be avoided since the target antenna's orientation isn't known in advance. In contrast, CP signals are free from this problem. The RHCP signal can't be received by LHCP antennas no matter which orientation the target antenna has." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.371, + 0.916, + 0.413 + ], + "angle": 0, + "content": "Next, we will proceed with describing the prototype implementation to gain insights on the performance of Bifrost in varied settings." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.415, + 0.673, + 0.43 + ], + "angle": 0, + "content": "4 EVALUATION" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.445, + 0.916, + 0.466 + ], + "angle": 0, + "content": "We evaluate the performance of Bifrost using two low-cost PCB-based LWAs working at 5.17GHz-5.33GHz and a WiFi sensing plat" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.471, + 0.915, + 0.693 + ], + "angle": 0, + "content": "form called PicoScenes [38] to extract CSI. When Bifrost functions, the WiFi transceiver communicates at the same band based on 802.11ax standard [1]. We first describe our implementation and evaluation settings in §4.1. Then, investigation on Bifrost's performance is four-pronged: §4.2 compares Bifrost with SpotFi [43], the state-of-the-art indoor WiFi localization technique, in a real-world indoor setting and NLoS scenarios, and then shows how the localization accuracy can be improved when Bifrost aids SpotFi to function in AP-constrained scenarios; Subsequently, in §4.3, we conduct an ablation study to evaluate the contribution of each sub-module of localization algorithm; Then, in §4.4, we dissect the impacting factors on localization accuracy, including multipath, transmission power, as well as the distance between LWAs and the AP; Also, we evaluate the influence of deploying Bifrost on data communication of WiFi transceivers in §4.5; Finally we summarize the evaluation in §4.6." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.704, + 0.842, + 0.738 + ], + "angle": 0, + "content": "4.1 Implementation and Experimental Methodology" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.743, + 0.916, + 0.895 + ], + "angle": 0, + "content": "Hardware and Software. Our proposed LWA is shown in Fig. 11(b). The main body of our LWA is \\(24.2\\mathrm{cm} \\times 5.2\\mathrm{cm}\\), containing 11 single units designed to ensure most input signals' energy can be leaked out. One of the LWA's feed ports is connected to a LP antenna for receiving the WiFi signal while the other port is connected to a \\(50\\Omega\\) matched load to absorb the remaining energy of the signal that goes through the entire LWA structure. By switching the feed port, the polarization of the FSDM signal can be altered between LHCP and RHCP. Besides, a low-noise amplifier powered by a small rechargeable battery is utilized to boost the input signal with 0.43W power consumption. A NE555 timer IC with a load switch circuit" + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.923, + 0.515, + 0.935 + ], + "angle": 0, + "content": "382" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.073, + 0.404, + 0.087 + ], + "angle": 0, + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + }, + { + "type": "header", + "bbox": [ + 0.805, + 0.073, + 0.914, + 0.086 + ], + "angle": 0, + "content": "Yimiao Sun, et al." + }, + { + "type": "image", + "bbox": [ + 0.107, + 0.104, + 0.462, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.27, + 0.233, + 0.288, + 0.244 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.107, + 0.253, + 0.462, + 0.383 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.269, + 0.384, + 0.289, + 0.395 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.107, + 0.405, + 0.28, + 0.578 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.197, + 0.583, + 0.214, + 0.594 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image", + "bbox": [ + 0.318, + 0.405, + 0.465, + 0.573 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.383, + 0.582, + 0.403, + 0.594 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.61, + 0.483, + 0.661 + ], + "angle": 0, + "content": "Figure 12: Experimental scenarios and deployment: (a) The hall scenario; (b) The classroom scenario; (c) The APs' deployment in the corridor and the classroom; (d) The APs' deployment in the hall and the meeting room." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.686, + 0.483, + 0.796 + ], + "angle": 0, + "content": "is employed to control the on-off state of the amplifier and further LWAs, resulting in a \\(20\\%\\) duty-cycled manner for energy saving. The cost of each proposed LWA is 7.41 USD, where 4.36 USD is for the material cost and 3.05 USD is for the control circuit. To receive the CP FSDM signal, we equip the target with two \\(3.87\\mathrm{cm}\\times 3.87\\mathrm{cm}\\) patch antennas, as Fig. 11(a) depicts. One antenna is LHCP, while the other is RHCP, and both are fixed on the antenna mount connected to COMFAST AX210 WiFi card [17] on the host computer." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.797, + 0.483, + 0.853 + ], + "angle": 0, + "content": "We use PicoScenes, a WiFi sensing platform, to send WiFi packets at AP with 20dBm, and extract CSI at the target. In the working band of Bifrost, PicoScenes can procure CSI data of 2025 subcarriers with indexes \\([-1012, 1012]\\). We run PicoScenes on Ubuntu 20.04," + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.107, + 0.914, + 0.133 + ], + "angle": 0, + "content": "then analyze CSI data and execute the localization algorithm on MATLAB 2022b." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.138, + 0.916, + 0.249 + ], + "angle": 0, + "content": "Baseline. We compare Bifrost with SpotFi, the state-of-the-art indoor WiFi localization technique, under various settings. To ensure the validity of our results, we make our best effort to re-implement SpotFi and ensure fairness through comparison. We evaluate the performance of SpotFi by deploying multiple WiFi APs strictly based on the real-world settings of WiFi APs, as Fig. 12 shows. Before each set of experiments, we use a laser rangefinder to obtain the ground-truth, including coordinates of the target device and LWAs." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.252, + 0.917, + 0.378 + ], + "angle": 0, + "content": "Scenarios and Deployment. We select four typical indoor scenarios for evaluation, across different sizes and different levels of multipath effect: 1) A small-size hall (6.2m×4.5m) with few multipath; 2) A long and narrow corridor (7.5m×2.1m) with few multipath; 3) A small-size meeting room (5.7m×4.9m) with rich multipath; 4) A large-size classroom (10.6m×7.1m) with rich multipath. In each scenario, two LWAs are attached to two orthogonal walls. The target device is mounted onto tripods, keeping the height constant across all experiments." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.388, + 0.731, + 0.403 + ], + "angle": 0, + "content": "4.2 Overall Performance" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.407, + 0.915, + 0.492 + ], + "angle": 0, + "content": "In this section, we first evaluate the localization accuracy of Bifrost and SpotFi in real-world settings, where WiFi APs in experiments are deployed at the same positions as those in practice. Then we deploy Bifrost in the meeting room and classroom, where SpotFi doesn't work well, to enhance the performance of SpotFi, so as to see the accuracy improvement brought by Bifrost." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.494, + 0.915, + 0.646 + ], + "angle": 0, + "content": "Performance Comparison in Realistic Settings. In reality, most indoor WiFi APs are dispersively deployed at different locations and very likely separated from each other by walls so that LoS paths are usually obstructed. Thus, the target device is hard to establish more than one LoS connection with APs, according to our real-world investigation (i.e., Fig. 2). We evaluate the performance of SpotFi in these practical indoor settings, and also the localization error of Bifrost when deployed in the above-mentioned four scenarios. 50 locations are chosen in each scenario for location estimation. The evaluation results are reported in Fig. 13 (The solid blue line stands for Bifrost and the dashed red line stands for SpotFi)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.647, + 0.916, + 0.825 + ], + "angle": 0, + "content": "In the hall, both Bifrost and SpotFi are supposed to exhibit the best performance due to the low-level multipath effect, but the median error of SpotFi is \\(1.23\\mathrm{m}\\), which is more than \\(\\mathcal{Z}\\) of Bifrost's \\(0.61\\mathrm{m}\\). This is because only one decent LoS signal can be obtained at most locations due to the blockage of walls even though three APs are deployed around. As the pie chart illustrates, SpotFi outperforms Bifrost at only 9 locations. When it comes to the corridor scenario, the median error of SpotFi increases to \\(1.77\\mathrm{m}\\) because two of the three APs are situated inside rooms so that AoAs obtained by the target are heavily distorted. We note that the median error of Bifrost also increases to \\(0.76\\mathrm{m}\\). This slight performance degradation is mainly due to the extension of the localization range, which is further investigated in §4.4." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.826, + 0.916, + 0.895 + ], + "angle": 0, + "content": "Next, we switch to the meeting room where more pronounced multipaths exist. What's worse, there is no AP in the meeting room, more challenging for both two approaches to function. The accuracy of the two approaches is unsurprisingly degraded, where the median error is \\(1.95\\mathrm{m}\\) in SpotFi and \\(0.91\\mathrm{m}\\) in Bifrost. Similarly, the" + }, + { + "type": "page_footnote", + "bbox": [ + 0.083, + 0.871, + 0.483, + 0.893 + ], + "angle": 0, + "content": "PicoScenes automatically interpolates the 0-th and other 32 pilot subcarriers besides 1992 tone RUs in this band." + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.923, + 0.515, + 0.935 + ], + "angle": 0, + "content": "383" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.073, + 0.138, + 0.086 + ], + "angle": 0, + "content": "Bifrost" + }, + { + "type": "header", + "bbox": [ + 0.595, + 0.073, + 0.913, + 0.087 + ], + "angle": 0, + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.104, + 0.287, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.165, + 0.257, + 0.207, + 0.267 + ], + "angle": 0, + "content": "(a) Hall" + }, + { + "type": "image", + "bbox": [ + 0.296, + 0.104, + 0.495, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.362, + 0.257, + 0.428, + 0.268 + ], + "angle": 0, + "content": "(b) Corridor" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.104, + 0.703, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.559, + 0.257, + 0.649, + 0.269 + ], + "angle": 0, + "content": "(c) Meeting room" + }, + { + "type": "image", + "bbox": [ + 0.713, + 0.104, + 0.911, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.776, + 0.257, + 0.849, + 0.268 + ], + "angle": 0, + "content": "(d) Classroom" + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.277, + 0.911, + 0.303 + ], + "angle": 0, + "content": "Figure 13: Overall performance of Bifrost and SpotFi across different scenarios (The pie charts represent how many locations where each method shows a lower error)." + }, + { + "type": "image", + "bbox": [ + 0.102, + 0.311, + 0.291, + 0.465 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.135, + 0.473, + 0.292, + 0.485 + ], + "angle": 0, + "content": "(a) The NLoS AP outdoors." + }, + { + "type": "image", + "bbox": [ + 0.304, + 0.31, + 0.45, + 0.465 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.315, + 0.473, + 0.465, + 0.485 + ], + "angle": 0, + "content": "(b) The NLoS AP indoors." + }, + { + "type": "image_caption", + "bbox": [ + 0.151, + 0.495, + 0.419, + 0.508 + ], + "angle": 0, + "content": "Figure 14: Deployment of the NLoS settings." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.532, + 0.483, + 0.643 + ], + "angle": 0, + "content": "performance of SpotFi is restrained due to the lack of the LoS signal. Bifrost exhibits acceptable performance in this tough environment and avoids escalation of errors. This can be attributed to two aspects. On one hand, Bifrost can function once the input signal has enough energy, without the need for LoS AP. On the other hand, Bifrost exploits a delicate algorithm to tame the multipath effect. We will further discuss issues of multipath and NLoS in §4.4. In this scenario, SpotFi doesn't outperform Bifrost on any point." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.644, + 0.483, + 0.725 + ], + "angle": 0, + "content": "Finally, we set SpotFi and Bifrost in the large-size classroom with rich multipath. With a LoS AP, the median error of SpotFi is reduced to \\(1.87\\mathrm{m}\\), which is better than that in the meeting room with no LoS AP. By contrast, the median error of Bifrost increases to \\(1.20\\mathrm{m}\\), mainly due to a longer distance between LWAs and WiFi APs and more multipath." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.726, + 0.483, + 0.809 + ], + "angle": 0, + "content": "Through all experiments in four scenarios, the median error of Bifrost is \\(0.81\\mathrm{m}\\), which is \\(52.35\\%\\) less than that of SpotFi (i.e., \\(1.70\\mathrm{m}\\)). Bifrost outperforms SpotFi at most locations, except at which the target can obtain 3 LoS signals from 3 APs. However, as shown in Fig. 13, the chance for SpotFi to achieve better performance is less than \\(7\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.812, + 0.483, + 0.867 + ], + "angle": 0, + "content": "Performance Comparison in NLoS Scenarios. Then we conduct two groups of experiments to demonstrate Bifrost's ability of localization in NLoS scenarios and compare its performance with that of SpotFi." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.867, + 0.483, + 0.895 + ], + "angle": 0, + "content": "In the first group of experiments, we deploy the localized target and the LWAs in a hall. As Bifrost only uses one AP to function," + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.318, + 0.916, + 0.497 + ], + "angle": 0, + "content": "we evaluate the performance of Bifrost when this AP is inside and outside the hall (i.e., LoS and NLoS scenarios). The results in Fig. 15 show that the median errors of Bifrost are \\(0.61\\mathrm{m}\\) in LoS and \\(0.73\\mathrm{m}\\) in NLoS, respectively. Meanwhile, in the same hall, we also evaluate the performance of SpotFi in LoS and NLoS scenarios, respectively. In the LoS scenario, 3 APs are deployed in the hall and can establish LoS connections with the target. In the NLoS scenario, as Fig. 14(a) shows, one of the APs (i.e., AP1) is outside the room, while the other 2 APs (i.e., AP2 and AP3) can connect with the target along the LoS paths. We find that the median error of SpotFi increases from \\(0.45\\mathrm{m}\\) in LoS to \\(1.15\\mathrm{m}\\) in NLoS. The error may further go beyond \\(1.6\\mathrm{m}\\) if only one AP is left in LoS, as reported in [43]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.498, + 0.916, + 0.65 + ], + "angle": 0, + "content": "In the second group of experiments, we compare the performance of Bifrost and SpotFi using a different NLoS setting. As Fig. 14(b) shows, we deploy the localized target, LWAs and three APs in the same hall. One of the three WiFi APs (i.e., AP1) is deliberately deployed around the corner and surrounded by multiple chairs, so it can't establish LoS connections between the target or the LWAs, while the other 2 APs (i.e., AP2 and AP3) can connect with the target along the LoS paths. SpotFi uses all 3 APs to localize the target, and its median error is \\(1.21\\mathrm{m}\\). Bifrost only uses the AP in NLoS (i.e., AP1) to function, and its median error is \\(0.69\\mathrm{m}\\), which is \\(42.98\\%\\) less than that of SpotFi." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.65, + 0.915, + 0.706 + ], + "angle": 0, + "content": "These two groups of experiments demonstrate that Bifrost provides relatively stable performance when the WiFi AP is in LoS and NLoS scenarios. In NLoS scenarios, Bifrost can achieve much more accurate performance than SpotFi." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.709, + 0.916, + 0.832 + ], + "angle": 0, + "content": "Performance Enhancement when Bifrost Aids SpotFi. Next, we deploy Bifrost where SpotFi shows poor accuracy to see if Bifrost can aid SpotFi to improve localization accuracy. Actually, it is impossible to deploy Bifrost everywhere, so we choose the meeting room and classroom where localization accuracy is heavily affected by constrained APs and reports the worst results. Specifically, when the target gets into these two scenarios, its location will be reported by Bifrost. Otherwise, the target keeps using SpotFi for indoor localization." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.833, + 0.913, + 0.875 + ], + "angle": 0, + "content": "As shown in Fig. 16, the median localization error is \\(1.13\\mathrm{m}\\) when Bifrost aids SpotFi, achieving \\(33.54\\%\\) error reduction compared with SpotFi operating independently in all scenarios. This indicates" + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.923, + 0.515, + 0.935 + ], + "angle": 0, + "content": "384" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.072, + 0.404, + 0.087 + ], + "angle": 0, + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + }, + { + "type": "header", + "bbox": [ + 0.805, + 0.073, + 0.914, + 0.086 + ], + "angle": 0, + "content": "Yimiao Sun, et al." + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.104, + 0.293, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.296, + 0.104, + 0.495, + 0.251 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.103, + 0.691, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.263, + 0.692, + 0.292 + ], + "angle": 0, + "content": "Figure 15: Performance of Bifrost Figure 16: Performance enhance-Figure 17: Ablation study on the and SpotFi in the NLoS scenario. ment brought by Bifrost. localization algorithm." + }, + { + "type": "image", + "bbox": [ + 0.709, + 0.104, + 0.915, + 0.251 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.706, + 0.263, + 0.914, + 0.29 + ], + "angle": 0, + "content": "Figure 18: Impact of the multipath effect." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.312, + 0.482, + 0.34 + ], + "angle": 0, + "content": "that Bifrost can not only work independently, but also enhance localization accuracy of existing localization techniques." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.359, + 0.25, + 0.376 + ], + "angle": 0, + "content": "4.3 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.378, + 0.484, + 0.501 + ], + "angle": 0, + "content": "There are three crucial sub-modules in Bifrost's localization algorithm, that is, identifying the frequencies of CP signals (module 1 presented in §3.3), filtering out the multipath signal (module 2 presented in §3.4) and purifying the LoS signal for localization (module 3 presented in §3.5). We conduct an ablation study to evaluate the contribution of each sub-module to localization accuracy. The evaluation is conducted under four settings, S1: without any sub-module, S2: only with module 1, S3: with modules 1 and 2, and S4: with all three modules." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.502, + 0.484, + 0.627 + ], + "angle": 0, + "content": "Fig. 17 reports the results of this ablation study. If we do nothing and directly extract frequencies from raw amplitude data of CSI, the median localization error will surge to \\(3.31\\mathrm{m}\\) (S1). Instead, once the LP WiFi signal is filtered out, the frequencies of CP signals can be highlighted, which results in the median localization error of \\(1.51\\mathrm{m}\\) (S2). Further, the results of S3 and S4 show that the median error will be reduced to around \\(0.93\\mathrm{m}\\) and \\(0.81\\mathrm{m}\\) if we filter out the multipath signal and purify the LoS signal. These results show the necessity and contribution of each module in our design." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.646, + 0.275, + 0.663 + ], + "angle": 0, + "content": "4.4 Impacting Factors" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.665, + 0.484, + 0.719 + ], + "angle": 0, + "content": "Next, we analyze the impact of three different factors on the performance of Bifrost, that is, multipath in the environment, the transmission power, as well as the distance between LWAs and WiFi AP." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.725, + 0.484, + 0.891 + ], + "angle": 0, + "content": "Multipath. We examine the AoA estimation accuracy of Bifrost in multipath scenarios. We fix the positions of LWAs and the target, then change the number of indoor objects (i.e., chairs and desks) to create different degrees of multipath. Specifically, two desks are first set in the room to emulate a light multipath environment, and then ten chairs are further added to produce richer signal reflections. The results in Fig. 18 indicate that the AoA estimation accuracy degrades as the multipath is intensified, where the median angle error initially sits around \\(3.8^{\\circ}\\), and then increases to around \\(6.7^{\\circ}\\). The more multipath exists, the more sparsely clustered subcarriers \\(C_s\\) are formed. Thus, when these clusters are stacked with each other to form a wider cluster, there is a certain chance for our" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.312, + 0.914, + 0.339 + ], + "angle": 0, + "content": "algorithm to misidentify the wrong LoS signal, causing greater errors in AoA estimation." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.34, + 0.916, + 0.409 + ], + "angle": 0, + "content": "We also note that Bifrost maintains relatively stable performance across different polarizations. The difference between median errors of LHCP and RHCP signals is less than \\(0.3^{\\circ}\\), which underscores the robustness of our proposed LWA and localization algorithm." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.414, + 0.916, + 0.621 + ], + "angle": 0, + "content": "Transmission Power. The default transmission power of AP is 20dBm in our above-mentioned evaluations, and we now vary this value to investigate its impact on localization performance. Moreover, as mentioned before, we can't always guarantee that the WiFi AP establishes LoS path with LWAs, so we also compare the situation of the AP at LoS and NLoS scenarios in each setting of transmission power. We place AP at 2m distance outside the door and the target 2m inside the door, switching between the LoS and NLoS scenarios by opening and closing the door. Results in Fig. 19 show that decreasing the transmission power leads to an increase in the localization error, regardless of whether the AP is at LoS or NLoS. Besides, the errors in LoS scenario are always lower than that of NLoS for the same transmission power. These findings indicate the negative impact on localization performance that NLoS can have." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.622, + 0.916, + 0.719 + ], + "angle": 0, + "content": "However, we also observe that as the transmission power increases, the impact of NLoS on the performance of Bifrost decreases, albeit gradually. Notably, when the transmission power is set at \\(20\\mathrm{dBm}\\), the median errors are \\(0.61\\mathrm{m}\\) and \\(0.73\\mathrm{m}\\) at LoS and NLoS scenarios, respectively. In practical scenarios, this performance is sufficient to meet the requirements of most location-based applications." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.724, + 0.916, + 0.891 + ], + "angle": 0, + "content": "Distance between AP and LWAs. The performance of Bifrost may be influenced by the energy of the input WiFi signal fed into LWAs, because it determines the SNR (signal-to-noise ratio) of the FSDM signal. The energy of the input WiFi signal is mainly related to two factors, namely the transmission power and the distance between the AP and LWAs. While the former factor is previously discussed, we here probe into the impact of distance. We carry out the experiments along the corridor and remove the reflectors as far as possible, while the distance is set to \\(2.5\\mathrm{m}\\), \\(5\\mathrm{m}\\), \\(7.5\\mathrm{m}\\), and \\(10\\mathrm{m}\\). Results in Fig. 20 demonstrate that the localization error increases with distance and may even result in outliers. The median errors are \\(0.63\\mathrm{m}\\), \\(0.65\\mathrm{m}\\), and \\(0.93\\mathrm{m}\\) in the first three groups of experiments," + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.923, + 0.515, + 0.935 + ], + "angle": 0, + "content": "385" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.073, + 0.138, + 0.086 + ], + "angle": 0, + "content": "Bifrost" + }, + { + "type": "header", + "bbox": [ + 0.595, + 0.073, + 0.913, + 0.088 + ], + "angle": 0, + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + }, + { + "type": "image", + "bbox": [ + 0.09, + 0.104, + 0.3, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.305, + 0.104, + 0.497, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.104, + 0.699, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.702, + 0.104, + 0.904, + 0.251 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.086, + 0.263, + 0.91, + 0.292 + ], + "angle": 0, + "content": "Figure 19: Impact of the transmis- Figure 20: Impact of the distance Figure 21: Impact on the AP and Figure 22: Impact on other WiFi con-sion power. between AP and LWAs. the target of Bifrost. nections." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.313, + 0.484, + 0.368 + ], + "angle": 0, + "content": "all of which are below \\(1\\mathrm{m}\\), yet spike to \\(1.49\\mathrm{m}\\) in the setting of \\(10\\mathrm{m}\\) distance. Despite this, the range of \\(7.5\\mathrm{m}\\) is sufficient to cover most rooms in a typical building, thus ensuring the feasibility of Bifrost's function." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.38, + 0.348, + 0.396 + ], + "angle": 0, + "content": "4.5 Impact on Communication" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.398, + 0.483, + 0.522 + ], + "angle": 0, + "content": "In this section, we evaluate the impact of deploying Bifrost on the WiFi connections, including the connection between the AP and the target as well as other connections. Firstly, we control the AP to transmit 1000 packets at a \\(50~ms\\) interval, and the packet loss rate is recorded in each group of experiments. The results in Fig. 21 show that the median packet loss rates are \\(3.92\\%\\) and \\(3.71\\%\\) when the LWA is on and off, respectively. This \\(0.2\\%\\) difference implies that the function of Bifrost has a negligible influence on the AP-target communication." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.523, + 0.484, + 0.703 + ], + "angle": 0, + "content": "Secondly, we place Bifrost's transceiver at an intersection region covered by two commercial APs (AP1 in a classroom and AP2 in a laboratory) with good signal quality. We then use different off-the-shelf smartphones to establish WiFi connections with these APs and record the variation in throughput over 2 hours for each connection (C1: OnePlus 9-AP1, C2: iPhone 13-AP2, C3: OnePlus 9-AP1, and C4: iPhone-13-AP2). The results are shown in Fig. 22. We find that the median throughput degrades \\(2.7\\%\\) and \\(0.4\\%\\) in C1 and C3, which have nearly no impact on the network quality or user experience. Interestingly, the throughput increases when the LWAs are turned on for C2 and C4. We attribute this increase to the statistical error that is mainly caused by changes in network quality and wireless channels." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.714, + 0.322, + 0.73 + ], + "angle": 0, + "content": "4.6 Summary of Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.732, + 0.483, + 0.76 + ], + "angle": 0, + "content": "Based on the above evaluations on Bifrost, the following summary can be drawn:" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.763, + 0.483, + 0.806 + ], + "angle": 0, + "content": "1) The median localization error of Bifrost is \\(0.81\\mathrm{m}\\), which is \\(52.35\\%\\) less than that of SpotFi in arguably realistic indoor settings." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.808, + 0.484, + 0.85 + ], + "angle": 0, + "content": "2) Bifrost can be deployed in scenarios without enough APs to help SpotFi enhance performance, reducing the overall localization error of SpotFi by \\(33.54\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.853, + 0.484, + 0.896 + ], + "angle": 0, + "content": "3) Distance between LWAs and APs, multipath and transmission power influence Bifrost's performance differently, yet the absolute accuracy never degrades drastically." + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.763, + 0.484, + 0.896 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.312, + 0.913, + 0.355 + ], + "angle": 0, + "content": "4) The deployment of Bifrost has a negligible impact on the communication quality of either the link between the AP and the target or other WiFi connections." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.374, + 0.666, + 0.388 + ], + "angle": 0, + "content": "5 DISCUSSION" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.393, + 0.916, + 0.421 + ], + "angle": 0, + "content": "In this section, we discuss practical issues concerning the applicability and efficacy of Bifrost." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.424, + 0.916, + 0.507 + ], + "angle": 0, + "content": "Complexity of Deployment. Deploying Bifrost can be easy and straightforward via two steps: stick LWAs to the wall, and measure LWAs' coordinates. Compared with most existing indoor localization methods, Bifrost works in a plug-and-play manner, requiring neither complex configurations nor additional operations on APs and the target." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.51, + 0.916, + 0.594 + ], + "angle": 0, + "content": "FoV and Coverage of LWAs. Bifrost achieves \\(22^{\\circ}\\) FoV in the current prototype by using 160MHz bandwidth (5.17GHz - 5.33GHz). The FoV and coverage can be expanded by using the entire WiFi band, including frequencies at 2.4GHz, 5.2GHz, and 5.8GHz [47]. This expansion is feasible because most existing WiFi devices have supported dual- or tri-band functionality." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.597, + 0.916, + 0.749 + ], + "angle": 0, + "content": "Applicability. Considering that most of the current commercial WiFi devices are equipped with LP antennas, they may be not compatible with Bifrost yet. There are two potential solutions to enhance the applicability of Bifrost. On one hand, some commercial off-the-shelf CP antennas (e.g., CP flat patch antennas [45] of L-com, Inc) are developed to be integrated with existing WiFi APs. Bifrost can be deployed on such devices. On the other hand, in our future work, we will study how to utilize LP rather than CP signals to improve the applicability of Bifrost. To distinguish LWAs using the LP signals, different phase shifts or OOK patterns may be exploited." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.75, + 0.916, + 0.86 + ], + "angle": 0, + "content": "Besides, the indoor obstacles may also influence the applicability of Bifrost. The reason is that the localization performance will degrade if the LoS paths between LWAs and the target are blocked by the obstacles. Therefore, one may select proper positions to deploy LWAs to avoid NLoS propagation to the target to be localized. However, the LoS path between LWAs and the WiFi AP isn't a precondition. As long as the LWAs can receive the signal from the WiFi AP, Bifrost can work." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.864, + 0.913, + 0.892 + ], + "angle": 0, + "content": "Lifetime and Maintenance Cost. The rated current of LWAs is \\(0.86\\mathrm{mA}\\). A LWA is powered with a 1600mAh battery and works" + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.923, + 0.516, + 0.935 + ], + "angle": 0, + "content": "386" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.073, + 0.402, + 0.087 + ], + "angle": 0, + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + }, + { + "type": "header", + "bbox": [ + 0.805, + 0.073, + 0.914, + 0.086 + ], + "angle": 0, + "content": "Yimiao Sun, et al." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.482, + 0.149 + ], + "angle": 0, + "content": "at \\(20\\%\\) duty cycle. So the estimated lifetime of a LWA is over 9302 hours (\\(\\approx\\)387 days) and the maintenance cost is recharging the battery once every 387 days." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.152, + 0.482, + 0.262 + ], + "angle": 0, + "content": "Potential Interference. One may be concerned that if multiple LWAs are deployed closely, LWAs with the same polarization will interfere with each other. However, each room only has one RHCP LWA and one LHCP LWA in the setting of Bifrost, so LWAs with the same polarization are separated by walls. Interference signals must propagate through the wall, after which they only have low strength. Therefore, different pairs of LWAs hardly interfere with each other." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.278, + 0.258, + 0.293 + ], + "angle": 0, + "content": "6 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.298, + 0.482, + 0.326 + ], + "angle": 0, + "content": "In this section, we briefly summarize existing works in the fields related to our work." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.341, + 0.292, + 0.358 + ], + "angle": 0, + "content": "6.1 Application of LWA" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.36, + 0.485, + 0.456 + ], + "angle": 0, + "content": "The work closest to ours is 123-LOC [42], which presents a THz LWA with two perpendicular slots to radiate horizontal and vertical polarized FSDM signals. Range and angle estimation is then performed by the receiver based on the bandwidth and frequencies of received signals. In comparison, Bifrost reduces the impact of multipath and achieves room-scale localization, which is a challenging task for THz signals." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.457, + 0.485, + 0.651 + ], + "angle": 0, + "content": "LeakyTrack [21] tracks the object between two LWAs based on the observation that nodal and environmental motion changes the received spectral profile of FSDM signals. [76] investigates the security of THz networks with LWAs and shows that FSDM signals of the LWA can hinder eavesdroppers, e.g., by using a wide-band transmission. [20] and [22] study single-shot link discovery with the help of FSDM signals from the LWA. A receiver can discover the direction of the path from the transmitter in one shot. In contrast to those works that require a specific feeding device for THz LWA, Bifrost operates in the WiFi band and works in a plug-and-play manner, providing better applicability and convenience. Additionally, Bifrost addresses relevant challenges, including multipath, noise and ambiguity, by delicately designing the hardware and localization algorithm." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.667, + 0.38, + 0.682 + ], + "angle": 0, + "content": "6.2 WiFi-based Indoor Localization" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.685, + 0.485, + 0.838 + ], + "angle": 0, + "content": "There have been numerous efforts on indoor localization with WiFi [16, 49, 61, 68-70, 84]. Traditional fingerprint-based techniques have been widely used by mapping the RSS readings from multiple APs with locations [46, 66]. Techniques based on AoA and ToF have become more prevalent recently. For example, ArrayTrack [69] proposes an AoA-based WiFi localization system that incorporates multiple APs and the Multiple Signal Classification (MUSIC) algorithm. SpotFi [43] proposes a MUSIC algorithm to obtain AoA and ToF simultaneously. The \\( M^3 \\) system [16] reduces the amount of APs to only one by utilizing multipath signals and frequency hopping among multiple channels." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.839, + 0.485, + 0.893 + ], + "angle": 0, + "content": "Despite such inspiring advances, the existing proposals may chop up the communication link between the target and the AP when the target hops between different APs or channels. In contrast, Bifrost does not interfere with the communication link, which" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.107, + 0.914, + 0.135 + ], + "angle": 0, + "content": "supplements the APs' localization ability, without compromising their communication ability." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.146, + 0.842, + 0.163 + ], + "angle": 0, + "content": "6.3 Polarization of the Wireless Signal" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.165, + 0.916, + 0.207 + ], + "angle": 0, + "content": "LLAMA [15] designs a metasurface to mitigate polarization mismatch by rotating the polarization of wireless signals, which is achieved by applying the bias voltage to the orthogonal compo" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.212, + 0.916, + 0.352 + ], + "angle": 0, + "content": "nents (like \\(\\overrightarrow{E_x}\\) and \\(\\overrightarrow{E_y}\\) shown in Fig. 4) of input signals. RoS [55] and mmTag [51] propose well-designed Van Atta arrays. They all change the polarization of input mmWave signals to the orthogonal one to deal with the self-interference between the incoming signals and the backscattered signals. IntuWition [77] observes that different materials can reflect and scatter the incoming polarized signals in different ways, based on which it exploits the technique to classify various materials. SiWa [83] utilizes the similar principle to inspect the wall structure without undermining the structural integrity." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.352, + 0.916, + 0.394 + ], + "angle": 0, + "content": "The above-mentioned works mainly focus on mutable LP signals. Bifrost instead explores the use of orthogonal CP signals, providing more robust performance." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.4, + 0.803, + 0.415 + ], + "angle": 0, + "content": "6.4 Backscatter-aided Localization" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.419, + 0.916, + 0.584 + ], + "angle": 0, + "content": "Enabled by the backscatter technology [8, 26, 29-31, 37, 53, 55, 58], many novel applications are enabled, one of which is localization. Both Hawkeye [8] and Millimetro [58] design backscatter tags based on Van Atta arrays to enhance the energy of backscatter signals, so they can localize tags in long range (over \\(100\\mathrm{m}\\)). By assigning unique OOK modulation frequencies to different tags, those two works can also identify and localize tags simultaneously. Moreover, RFID technology [34-36, 39-41] has been widely used in localization tasks. As a typical backscatter technology, RFID can modulate information via the RFID tags. Then, RFID reader can usually infer the range or orientation to the tags by analyzing the phase variation of the backscatter signals." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.585, + 0.913, + 0.626 + ], + "angle": 0, + "content": "Compared to those works, Bifrost utilizes tags (i.e., LWAs) to create FSDM signals to localize another target, rather than the tag itself." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.637, + 0.677, + 0.652 + ], + "angle": 0, + "content": "7 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.656, + 0.916, + 0.795 + ], + "angle": 0, + "content": "This paper introduces Bifrost, a low-cost and plug-and-play technique to enhance the availability and accuracy of WiFi localization. It can either aid existing techniques to improve their performance, or operate independently to outperform the state of the arts in arguably realistic indoor settings, without affecting ongoing data communication of WiFi networks. What sets Bifrost apart from other solutions is the exploration in the polarization of wireless signals and the dispersion property of LWAs, which embodies the concept of RF computing [15, 29, 53, 55]. We plan to explore the research space further in this direction." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.806, + 0.729, + 0.82 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.825, + 0.916, + 0.893 + ], + "angle": 0, + "content": "We thank our anonymous shepherd and reviewers for their insightful comments. This work is partially supported by the National Natural Science Foundation of China under grant No. U21B2007, and the Guoqiang Institute of Tsinghua University under grant No. 2021GQG1002." + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.923, + 0.516, + 0.935 + ], + "angle": 0, + "content": "387" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.073, + 0.138, + 0.086 + ], + "angle": 0, + "content": "Bifrost" + }, + { + "type": "header", + "bbox": [ + 0.595, + 0.073, + 0.913, + 0.087 + ], + "angle": 0, + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.106, + 0.206, + 0.119 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.122, + 0.484, + 0.184 + ], + "angle": 0, + "content": "[1] 2021. IEEE Standard for Information Technology-Telecommunications and Information Exchange between Systems Local and Metropolitan Area Networks-Specific Requirements Part 11: Wireless LAN Medium Access Control (MAC) and Physical Layer (PHY) Specifications Amendment 1: Enhancements for High-Efficiency WLAN. IEEE Std 802.11ax-2021 (Amendment to IEEE Std 802.11-2020) (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.183, + 0.482, + 0.224 + ], + "angle": 0, + "content": "[2] Afaz Uddin Ahmed, Reza Arablouei, Frank De Hoog, Branislav Kusy, and Raja Jurdak. 2019. Multi-radio Data Fusion for Indoor Localization Using Bluetooth and WiFi. In Proceedings of the 9th International Conference on Pervasive and Embedded Computing and Communication Systems: Volume 1: PECC." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.224, + 0.482, + 0.263 + ], + "angle": 0, + "content": "[3] Amazon. 2023. Amazon NETGEAR 4-Stream WiFi 6 Router. https://www.amazon.com/NETGEAR-4-Stream-WiFi-Router-R6700AX/dp/B08KTXG8Q5/ref=sr_1_5?keywords=wifi+router&qid=1687784198&sr=8-5. (2023). Accessed: 2023-06-26." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.264, + 0.482, + 0.303 + ], + "angle": 0, + "content": "[4] Amazon. 2023. Amazon Tenda AC1200 Smart WiFi Router. https://www.amazon.com/Tenda-Wireless-Internet-MU-MIMO-AC6/dp/B06X1CHFJ5/ref=sr_1_51?keywords \\(\\equiv\\) wifi+router&qid \\(=\\) 1687784310&sr \\(= 8 - 51\\) (2023).Accessed:2023-06-26." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.304, + 0.482, + 0.343 + ], + "angle": 0, + "content": "[5] Amazon. 2023. Amazon TP-Link AC1200 WiFi Router. https://www.amazon.com/TP-Link-AC1200-Router-Archer-A54/dp/B09G5Y1HWZ/ref=sr_1_1?keywords=wifi+router&qid=1687784198&sr=8-1. (2023). Accessed: 2023-06-26." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.343, + 0.482, + 0.383 + ], + "angle": 0, + "content": "[6] Amazon. 2023. Amazon TP-Link Smart WiFi 6 Router. https://www.amazon.com/TP-Link-Wireless-AX1500-Wifi-Router/dp/B07ZSDR49S/ref=sr_1_3?keywords=wifi+router&qid=1687784198&sr=8-3. (2023). Accessed: 2023-06-26." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.383, + 0.482, + 0.434 + ], + "angle": 0, + "content": "[7] Roshan Ayyalasomayajula, Aditya Arun, Chenfeng Wu, Sanatan Sharma, Abhishek Rajkumar Sethi, Deepak Vasisht, and Dinesh Bharadia. 2020. Deep Learning Based Wireless Localization for Indoor Navigation. In Proceedings of the 26th Annual International Conference on Mobile Computing and Networking (MobiCom)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.435, + 0.482, + 0.475 + ], + "angle": 0, + "content": "[8] Kang Min Bae, Hankyeol Moon, Sung-Min Sohn, and Song Min Kim. 2023. Hawkeye: Hectometer-range Subcentimeter Localization for Large-scale mmWave Backscatter. In Proceedings of the 21st Annual International Conference on Mobile Systems, Applications and Services (MobiSys)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.476, + 0.482, + 0.515 + ], + "angle": 0, + "content": "[9] Atul Bansal, Akshay Gadre, Vaibhav Singh, Anthony Rowe, Bob Iannucci, and Swarun Kumar. 2021. Owll: Accurate LoRa Localization Using the TV Whitespaces. In Proceedings of the 20th International Conference on Information Processing in Sensor Networks (IPSN)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.515, + 0.482, + 0.546 + ], + "angle": 0, + "content": "[10] Yuanxi Cao and Sen Yan. 2021. A Low-profile High-gain Multi-beam Antenna based on 3D-printed Cylindrical Luneburg Lens. Microwave and Optical Technology Letters 63, 7 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.546, + 0.482, + 0.577 + ], + "angle": 0, + "content": "[11] Yuanxi Cao and Sen Yan. 2021. Multi-beam SIW Leaky-wave Antenna with 2-D Beam Scanning Capability for Millimeter-wave Radar Applications. International Journal of RF and Microwave Computer-aided Engineering 31, 5 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.577, + 0.482, + 0.607 + ], + "angle": 0, + "content": "[12] Yuanxi Cao, Sen Yan, and Juan Chen. 2023. An SIW Pillbox-based Compact Dual-polarized Multibeam Antenna with Passive 2-D Beam Scanning Capability. IEEE Transactions on Circuits and Systems II: Express Briefs 70, 1 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.608, + 0.482, + 0.638 + ], + "angle": 0, + "content": "[13] Yuanxi Cao, Sen Yan, Wendong Liu, and Jianxing Li. 2023. A Wideband Multibeam Pillbox Antenna Based on Differentially Fed Leaky-wave Array. IEEE Antennas and Wireless Propagation Letters 22, 3 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.638, + 0.482, + 0.668 + ], + "angle": 0, + "content": "[14] Roberto Carvalho, Shan-Ho Yang, Yao-Hua Ho, and Ling-Jyh Chen. [n.d.]. Indoor Localization Using FM and DVB-T Signals. In Proceedings of the 2016 13th IEEE Annual Consumer Communications & Networking Conference (CCNC)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.668, + 0.482, + 0.709 + ], + "angle": 0, + "content": "[15] Lili Chen, Wenjun Hu, Kyle Jamieson, Xiaojiang Chen, Dingyi Fang, and Jeremy Gummeson. [n. d.]. Pushing the Physical Limits of IoT Devices with Programmable Metasurfaces. In Proceedings of the 18th USENIX Symposium on Networked Systems Design and Implementation (NSDI)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.709, + 0.482, + 0.739 + ], + "angle": 0, + "content": "[16] Zhe Chen, Guorong Zhu, Sulei Wang, Yuedong Xu, Jie Xiong, Jin Zhao, Jun Luo, and Xin Wang. 2019. \\(M^3\\): Multipath Assisted Wi-Fi Localization with a Single Access Point. IEEE Transactions on Mobile Computing 20, 2 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.739, + 0.482, + 0.759 + ], + "angle": 0, + "content": "[17] COMFAST. 2023. CF-AX210 PRO. http://www.comfast.com.cn/index.php?m=content&c=index&a=show&catid=13&id=123. (2023). Accessed: 2023-03-17." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.759, + 0.482, + 0.79 + ], + "angle": 0, + "content": "[18] Pei Du and Nirupama Bulusu. 2021. An Automated AR-based Annotation Tool for Indoor Navigation for Visually Impaired People. In Proceedings of the 23rd International ACM SIGACCESS Conference on Computers and Accessibility." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.79, + 0.482, + 0.82 + ], + "angle": 0, + "content": "[19] Pei Du and Nirupama Bulusu. 2022. Indoor Navigation for Visually Impaired People with Vertex Colored Graphs. In Proceedings of the 20th Annual International Conference on Mobile Systems, Applications and Services (MobiSys)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.82, + 0.482, + 0.851 + ], + "angle": 0, + "content": "[20] Yasaman Ghasempour, Rabi Shrestha, Aaron Charous, Edward Knightly, and Daniel M Mittleman. 2020. Single-shot Link Discovery for Terahertz Wireless Networks. Nature Communications 11, 1 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.851, + 0.482, + 0.892 + ], + "angle": 0, + "content": "[21] Yasaman Ghasempour, Chia-Yi Yeh, Rabi Shrestha, Yasith Amarasinghe, Daniel Mittleman, and Edward W. Knightly. 2020. LeakyTrack: Non-coherent Single-antenna Nodal and Environmental Mobility Tracking with a Leaky-wave Antenna. In Proceedings of the 18th Conference on Embedded Networked Sensor Systems" + }, + { + "type": "list", + "bbox": [ + 0.087, + 0.122, + 0.484, + 0.892 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.11, + 0.588, + 0.12 + ], + "angle": 0, + "content": "(SenSys)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.12, + 0.913, + 0.16 + ], + "angle": 0, + "content": "[22] Yasaman Ghasempour, Chia-Yi Yeh, Rabi Shrestha, Daniel Mittleman, and Edward Knightly. 2020. Single Shot Single Antenna Path Discovery in THz Networks. In Proceedings of the 26th Annual International Conference on Mobile Computing and Networking (MobiCom)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.16, + 0.913, + 0.19 + ], + "angle": 0, + "content": "[23] Jon Gjengset, Jie Xiong, Graeme McPhillips, and Kyle Jamieson. 2014. Accurate Indoor Localization with Zero Start-up Cost. In Proceedings of the 20th Annual International Conference on Mobile Computing and Networking (MobiCom)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.19, + 0.913, + 0.231 + ], + "angle": 0, + "content": "[24] Jon Gjengset, Jie Xiong, Graeme McPhillips, and Kyle Jamieson. 2014. Phaser: Enabling Phased Array Signal Processing on Commodity WiFi Access Points. In Proceedings of the 20th Annual International Conference on Mobile Computing and Networking (MobiCom)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.231, + 0.913, + 0.272 + ], + "angle": 0, + "content": "[25] Baoshen Guo, Weijian Zuo, Shuai Wang, Wenjun Lyu, Zhiqing Hong, Yi Ding, Tian He, and Desheng Zhang. 2022. Wepos: Weak-supervised Indoor Positioning with Unlabeled WiFi for On-demand Delivery. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 6, 2 (2022), 1-25." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.272, + 0.913, + 0.312 + ], + "angle": 0, + "content": "[26] Xiuzhen Guo, Yuan He, Zihao Yu, Jiacheng Zhang, Yunhao Liu, and Longfei Shangguan. 2022. RF-transformer: A Unified Backscatter Radio Hardware Abstraction. In Proceedings of the 28th Annual International Conference on Mobile Computing And Networking (MobiCom)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.312, + 0.913, + 0.352 + ], + "angle": 0, + "content": "[27] Xiuzhen Guo, Yuan He, Xiaolong Zheng, Liangcheng Yu, and Omprakash Gnawali. 2018. ZigFi: Harnessing Channel State Information for Cross-Technology Communication. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.352, + 0.913, + 0.383 + ], + "angle": 0, + "content": "[28] Xiuzhen Guo, Yuan He, Xiaolong Zheng, Liangcheng Yu, and Omprakash Gnawali. 2020. ZigFi: Harnessing Channel State Information for Cross-Technology Communication. IEEE/ACM Transactions on Networking 28, 1 (2020), 301–311." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.383, + 0.913, + 0.422 + ], + "angle": 0, + "content": "[29] Xiuzhen Guo, Longfei Shangguan, Yuan He, Nan Jing, Jiacheng Zhang, Haotian Jiang, and Yunhao Liu. 2022. Saiyan: Design and Implementation of a Low-power Demodulator for LoRa Backscatter Systems. In Proceedings of the 19th USENIX Symposium on Networked Systems Design and Implementation (NSDI)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.422, + 0.913, + 0.463 + ], + "angle": 0, + "content": "[30] Xiuzhen Guo, Longfei Shangguan, Yuan He, Jia Zhang, Haotian Jiang, Awais Ahmad Siddiqi, and Yunhao Liu. 2020. Aloba: Rethinking ON-OFF Keying Modulation for Ambient LoRa Backscatter. In Proceedings of the 18th Conference on Embedded Networked Sensor Systems (SenSys)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.463, + 0.913, + 0.503 + ], + "angle": 0, + "content": "[31] Xiuzhen Guo, Longfei Shangguan, Yuan He, Jia Zhang, Haotian Jiang, Awais Ahmad Siddiqi, and Yunhao Liu. 2021. Efficient Ambient LoRa Backscatter with On-Off Keying Modulation. IEEE/ACM Transactions on Networking 30, 2 (2021), 641-654." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.503, + 0.913, + 0.534 + ], + "angle": 0, + "content": "[32] Yuan He, Weiguo Wang, Luca Mottola, Shuai Li, Yimiao Sun, Jinming Li, Hua Jing, Ting Wang, and Yulei Wang. 2023. Acoustic Localization System for Precise Drone Landing. IEEE Transactions on Mobile Computing (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.534, + 0.913, + 0.553 + ], + "angle": 0, + "content": "[33] David R Jackson, Christophe Caloz, and Tatsuo Itoh. 2012. Leaky-wave Antennas. Proc. IEEE 100, 7 (2012)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.553, + 0.913, + 0.583 + ], + "angle": 0, + "content": "[34] Chengkun Jiang, Yuan He, Songzhen Yang, Junchen Guo, and Yunhao Liu. 2019. 3D-OmniTrack: 3D Tracking with COTS RFID Systems. In Proceedings of the 18th International Conference on Information Processing in Sensor Networks (IPSN)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.583, + 0.913, + 0.614 + ], + "angle": 0, + "content": "[35] Chengkun Jiang, Yuan He, Xiaolong Zheng, and Yunhao Liu. 2018. Orientation-aware RFID Tracking with Centimeter-level Accuracy. In Proceedings of the 17th International Conference on Information Processing in Sensor Networks (IPSN)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.614, + 0.913, + 0.645 + ], + "angle": 0, + "content": "[36] Chengkun Jiang, Yuan He, Xiaolong Zheng, and Yunhao Liu. 2019. OmniTrack: Orientation-aware RFID Tracking with Centimeter-level Accuracy. IEEE Transactions on Mobile Computing 20, 2 (2019), 634-646." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.645, + 0.913, + 0.676 + ], + "angle": 0, + "content": "[37] Haotian Jiang, Jiacheng Zhang, Xiuzhen Guo, and Yuan He. 2021. Sense Me on the Ride: Accurate Mobile Sensing Over a LoRa Backscatter Channel. In Proceedings of the 19th ACM Conference on Embedded Networked Sensor Systems (SenSys)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.676, + 0.913, + 0.716 + ], + "angle": 0, + "content": "[38] Zhiping Jiang, Tom H. Luan, Xincheng Ren, Dongtao Lv, Han Hao, Jing Wang, Kun Zhao, Wei Xi, Yueshen Xu, and Rui Li. 2022. Eliminating the Barriers: Demystifying Wi-Fi Baseband Design and Introducing the PicoScenes Wi-Fi Sensing Platform. IEEE Internet of Things Journal 9, 6 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.716, + 0.913, + 0.747 + ], + "angle": 0, + "content": "[39] Meng Jin, Yuan He, Songzhen Yang, Yunhao Liu, Li Yan, and Yuji Sun. 2022. Versatile RFID-based Sensing: Model, Algorithm, and Applications. IEEE Transactions on Mobile Computing (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.747, + 0.913, + 0.778 + ], + "angle": 0, + "content": "[40] Meng Jin, Kexin Li, Xiaohua Tian, Xinbing Wang, and Chenghu Zhou. 2023. Fast, Fine-grained, and Robust Grouping of RFIDs. In Proceedings of the 29th Annual International Conference on Mobile Computing and Networking (MobiCom)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.778, + 0.913, + 0.817 + ], + "angle": 0, + "content": "[41] Meng Jin, Shun Yao, Kexin Li, Xiaohua Tian, Xinbing Wang, Chenghu Zhou, and Xinde Cao. 2022. A Passive Eye-in-Hand\" Camera\" for Miniature Robots. In Proceedings of the 20th ACM Conference on Embedded Networked Sensor Systems (SenSys)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.817, + 0.913, + 0.858 + ], + "angle": 0, + "content": "[42] Atsutse Kludze, Rabi Shrestha, Chowdhury Miftah, Edward Knightly, Daniel Mittleman, and Yasaman Ghasempour. 2022. Quasi-optical 3D Localization Using Asymmetric Signatures above 100 GHz. In Proceedings of the 28th Annual International Conference on Mobile Computing And Networking (MobiCom)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.858, + 0.913, + 0.888 + ], + "angle": 0, + "content": "[43] Manikanta Kotaru, Kiran Joshi, Dinesh Bharadia, and Sachin Katti. 2015. SpotFi: Decimeter Level Localization Using WiFi. In Proceedings of the 2015 ACM Conference on Special Interest Group on Data Communication (SIGCOMM)." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.11, + 0.913, + 0.888 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.923, + 0.516, + 0.935 + ], + "angle": 0, + "content": "388" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.073, + 0.402, + 0.087 + ], + "angle": 0, + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + }, + { + "type": "header", + "bbox": [ + 0.806, + 0.073, + 0.913, + 0.086 + ], + "angle": 0, + "content": "Yimiao Sun, et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.109, + 0.484, + 0.15 + ], + "angle": 0, + "content": "[44] Vikram Kumar, Reza Arablouei, Raja Jurdak, Branislav Kusy, and Neil W Bergmann. 2017. RSSI-based Self-localization with Perturbed Anchor Positions. In Proceedings of the 2017 IEEE 28th Annual International Symposium on Personal, Indoor, and Mobile Radio Communications (PIMRC)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.15, + 0.483, + 0.18 + ], + "angle": 0, + "content": "[45] L-com. 2023. Circular Polarized Patch Antenna. https://www.l-com.com/wireless-antenna-24-ghz-8-dbi-circular-polarized-rh-flat-patch-antennas. (2023). Accessed: 2023-10-03." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.18, + 0.483, + 0.21 + ], + "angle": 0, + "content": "[46] Danyang Li, Jingao Xu, Zheng Yang, Chenshu Wu, Jianbo Li, and Nicholas D Lane. 2021. Wireless Localization with Spatial-temporal Robust Fingerprints. ACM Transactions on Sensor Networks 18, 1 (2021), 1-23." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.21, + 0.482, + 0.241 + ], + "angle": 0, + "content": "[47] Tianxiang Li, Haofan Lu, Reza Rezvani, Ali Abedi, and Omid Abari. 2022. Bringing WiFi Localization to Any WiFi Devices. In Proceedings of the 21st ACM Workshop on Hot Topics in Networks (HotNets)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.241, + 0.483, + 0.282 + ], + "angle": 0, + "content": "[48] Tianxiang Li, Mohammad Hossein Mazaheri, and Omid Abari. 2022. 5g in the Sky: The Future of High-speed Internet via Unmanned Aerial Vehicles. In Proceedings of the 23rd Annual International Workshop on Mobile Computing Systems and Applications." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.282, + 0.482, + 0.323 + ], + "angle": 0, + "content": "[49] Xiang Li, Daqing Zhang, Qin Lv, Jie Xiong, Shengjie Li, Yue Zhang, and Hong Mei. 2017. IndoTrack: DeviceFree Indoor Human Tracking with Commodity Wi-Fi. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 1, 3 (2017)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.322, + 0.483, + 0.373 + ], + "angle": 0, + "content": "[50] Bo Liang, Purui Wang, Renjie Zhao, Heyu Guo, Pengyu Zhang, Junchen Guo, Shunmin Zhu, Hongqiang Harry Liu, Xinyu Zhang, and Chenren Xu. 2023. RF-Chord: Towards Deployable RFID Localization System for Logistic Networks. In Proceedings of the 20th USENIX Symposium on Networked Systems Design and Implementation (NSDI)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.372, + 0.482, + 0.402 + ], + "angle": 0, + "content": "[51] Mohammad Hossein Mazaheri, Alex Chen, and Omid Abari. 2021. mmTag: A Millimeter Wave Backscatter Network. In Proceedings of the 2021 ACM SIGCOMM 2021 Conference (SIGCOMM)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.402, + 0.482, + 0.432 + ], + "angle": 0, + "content": "[52] Francesco Monticone and Andrea Alu. 2015. Leaky-wave Theory, Techniques, and Applications: From Microwaves to Visible Frequencies. Proc. IEEE 103, 5 (2015)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.432, + 0.483, + 0.472 + ], + "angle": 0, + "content": "[53] Xin Na, Xiuzhen Guo, Zihao Yu, Jia Zhang, Yuan He, and Yunhao Liu. 2023. Leggiero: Analog WiFi Backscatter with Payload Transparency. In Proceedings of the 21st Annual International Conference on Mobile Systems, Applications and Services (MobiSys)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.472, + 0.482, + 0.522 + ], + "angle": 0, + "content": "[54] Sujay Narayana, Vijay Rao, R Venkatesha Prasad, Ajay K Kanthila, Kavya Managundi, Luca Mottola, and T Venkata Prabhakar. 2020. LOCI: Privacy-aware, Device-free, Low-power Localization of Multiple Persons Using IR Sensors. In Proceedings of the 19th International Conference on Information Processing in Sensor Networks (IPSN)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.522, + 0.482, + 0.553 + ], + "angle": 0, + "content": "[55] John Nolan, Kun Qian, and Xinyu Zhang. 2021. RoS: Passive Smart Surface for Roadside-to-Vehicle Communication. In Proceedings of the 2021 ACM SIGCOMM 2021 Conference (SIGCOMM)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.553, + 0.482, + 0.593 + ], + "angle": 0, + "content": "[56] Yuanchao Shu, Zhuqi Li, Borje Karlsson, Yiyong Lin, Thomas Moscibroda, and Kang Shin. [n. d.]. incrementally-deployable Indoor Navigation with Automatic Trace Generation. In Proceedings of IEEE International Conference on Computer Communications." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.593, + 0.482, + 0.634 + ], + "angle": 0, + "content": "[57] Elahe Soltanaghaei, Avinash Kalyanaraman, and Kamin Whitehouse. 2018. Multipath Triangulation: Decimeter-level WiFi Localization and Orientation with a Single Unaided Receiver. In Proceedings of the 16th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.634, + 0.482, + 0.684 + ], + "angle": 0, + "content": "[58] Elahe Soltanaghaei, Akarsh Prabhakara, Artur Balanuta, Matthew Anderson, Jan M Rabaey, Swarun Kumar, and Anthony Rowe. 2021. Millimetre: mmWave Retro-reflective Tags for Accurate, Long Range Localization. In Proceedings of the 27th Annual International Conference on Mobile Computing and Networking (MobiCom)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.684, + 0.482, + 0.724 + ], + "angle": 0, + "content": "[59] Yimiao Sun, Weiguo Wang, Luca Mottola, Ruijin Wang, and Yuan He. 2022. AIM: Acoustic Inertial Measurement for Indoor Drone Localization and Tracking. In Proceedings of the 20th Conference on Embedded Networked Sensor Systems (SenSys)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.724, + 0.482, + 0.765 + ], + "angle": 0, + "content": "[60] Huy Tran, Abhishek Mukherji, Nirupama Bulusu, Santosh Pandey, and Xu Zhang. 2019. Improving Infrastructure-based Indoor Positioning Systems with Device Motion Detection. In Proceedings of the 2019 IEEE International Conference on Pervasive Computing and Communications (PerCom)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.765, + 0.482, + 0.806 + ], + "angle": 0, + "content": "[61] Ju Wang, Hongbo Jiang, Jie Xiong, Kyle Jamieson, Xiaojiang Chen, Dingyi Fang, and Binbin Xie. 2016. LiFS: Low Human-effort, Device-free Localization with Fine-grained Subcarrier Information. In Proceedings of the 22nd Annual International Conference on Mobile Computing and Networking (MobiCom)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.806, + 0.482, + 0.845 + ], + "angle": 0, + "content": "[62] Weiguo Wang, Yuan He, Meng Jin, Yimiao Sun, and Xiuzhen Guo. 2023. Meta-Speaker: Acoustic Source Projection by Exploiting Air Nonlinearity. In Proceedings of the 29st Annual International Conference on Mobile Computing and Networking (MobiCom)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.845, + 0.482, + 0.866 + ], + "angle": 0, + "content": "[63] Weiguo Wang, Luca Mottola, Yuan He, Jinming Li, Yimiao Sun, Shuai Li, Hua Jing, and Yulei Wang. 2022. MicNest: Long-range Instant Acoustic Localization" + }, + { + "type": "list", + "bbox": [ + 0.086, + 0.109, + 0.484, + 0.866 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.109, + 0.913, + 0.13 + ], + "angle": 0, + "content": "of Drones in Precise Landing. In Proceedings of the 20th Conference on Embedded Networked Sensor Systems (SenSys)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.13, + 0.913, + 0.16 + ], + "angle": 0, + "content": "[64] Yongyong Wei and Rong Zheng. 2020. Handling Device Heterogeneity in Wi-Fi Based Indoor Positioning Systems. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.16, + 0.913, + 0.181 + ], + "angle": 0, + "content": "[65] Yongyong Wei and Rong Zheng. 2021. Efficient Wi-Fi Fingerprint Crowdsourcing for Indoor Localization. IEEE Sensors Journal 22, 6 (2021), 5055-5062." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.181, + 0.913, + 0.222 + ], + "angle": 0, + "content": "[66] Chenshu Wu, Jingao Xu, Zheng Yang, Nicholas D Lane, and Zuwei Yin. 2017. Gain without Pain: Accurate WiFi-based Localization Using Fingerprint Spatial Gradient. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 1, 2 (2017), 1-19." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.222, + 0.913, + 0.252 + ], + "angle": 0, + "content": "[67] Chenshu Wu, Zheng Yang, Zimu Zhou, Yunhao Liu, and Mingyan Liu. 2016. Mitigating Large Errors in WiFi-based Indoor Localization for Smartphones. IEEE Transactions on Vehicular Technology 66, 7 (2016), 6246-6257." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.252, + 0.913, + 0.282 + ], + "angle": 0, + "content": "[68] Yaxiong Xie, Jie Xiong, Mo Li, and Kyle Jamieson. 2019. md-Track: Leveraging Multi-dimensionality for Passive Indoor Wi-Fi Tracking. In Proceedings of the 25th Annual International Conference on Mobile Computing and Networking (MobiCom)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.282, + 0.913, + 0.313 + ], + "angle": 0, + "content": "[69] Jie Xiong and Kyle Jamieson. 2013. ArrayTrack: A Fine-grained Indoor Location System. In Proceedings of the 10th USENIX Symposium on Networked Systems Design and Implementation (NSDI)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.313, + 0.913, + 0.353 + ], + "angle": 0, + "content": "[70] Jie Xiong, Karthikeyan Sundaresan, and Kyle Jamieson. 2015. ToneTrack: Leveraging Frequency-agile Radios for Time-based Indoor Wireless Localization. In Proceedings of the 21st Annual International Conference on Mobile Computing and Networking (MobiCom)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.353, + 0.913, + 0.373 + ], + "angle": 0, + "content": "[71] Feng Xu and Ke Wu. 2013. Understanding Leaky-wave Structures: A Special Form of Guided-wave Structure. IEEE Microwave Magazine 14, 5 (2013)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.373, + 0.913, + 0.404 + ], + "angle": 0, + "content": "[72] Han Xu, Zheng Yang, Zimu Zhou, Ke Yi, and Chunyi Peng. [n. d]. Tum: Towards Ubiquitous Multi-device Localization for Cross-device Interaction. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.404, + 0.913, + 0.445 + ], + "angle": 0, + "content": "[73] Kun Yang, Xiaolong Zheng, Jie Xiong, Liang Liu, and Huadong Ma. 2022. Wilmg: Pushing the Limit of WiFi Sensing with Low Transmission Rates. In Proceedings of the 19th Annual IEEE International Conference on Sensing, Communication, and Networking (SECON)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.445, + 0.913, + 0.485 + ], + "angle": 0, + "content": "[74] Yu Yang, Yi Ding, Dengpan Yuan, Guang Wang, Xiaoyang Xie, Yunhuai Liu, Tian He, and Desheng Zhang. 2020. Transloc: Transparent Indoor Localization with Uncertain Human Participation for Instant Delivery. In Proceedings of the 26th Annual International Conference on Mobile Computing and Networking (MobiCom)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.484, + 0.913, + 0.505 + ], + "angle": 0, + "content": "[75] Zheng Yang, Zimu Zhou, and Yunhao Liu. 2013. From RSSI to CSI: Indoor Localization via Channel Response. Comput. Surveys 46, 2 (2013)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.505, + 0.913, + 0.545 + ], + "angle": 0, + "content": "[76] Chia-Yi Yeh, Yasaman Ghasempour, Yasith Amarasinghe, Daniel M Mittleman, and Edward W Knightly. 2020. Security in Terahertz WLANs with Leaky Wave Antennas. In Proceedings of the 13th ACM Conference on Security and Privacy in Wireless and Mobile Networks (WiSec)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.545, + 0.913, + 0.584 + ], + "angle": 0, + "content": "[77] Diana Zhang, Jingxian Wang, Junsu Jang, Junbo Zhang, and Swarun Kumar. 2019. On the Feasibility of Wi-Fi Based Material Sensing. In Proceedings of the 25st Annual International Conference on Mobile Computing and Networking (MobiCom)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.584, + 0.913, + 0.615 + ], + "angle": 0, + "content": "[78] Jia Zhang, Xiuzhen Guo, Haotian Jiang, Xiaolong Zheng, and Yuan He. 2020. Link Quality Estimation of Cross-technology Communication. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM). 496-505." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.615, + 0.913, + 0.656 + ], + "angle": 0, + "content": "[79] Jia Zhang, Xin Na, Rui Xi, Yimiao Sun, and Yuan He. 2023. mmHawkeye: Passive UAV Detection with a COTS mmWave Radar. In Proceedings of the 20th Annual IEEE International Conference on Sensing, Communication, and Networking (SECON)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.656, + 0.913, + 0.697 + ], + "angle": 0, + "content": "[80] Jia Zhang, Rui Xi, Yuan He, Yimiao Sun, Xiuzhen Guo, Weiguo Wang, Xin Na, Yunhao Liu, Zhenguo Shi, and Tao Gu. 2023. A Survey of mmWave-based Human Sensing: Technology, Platforms and Applications. IEEE Communications Surveys & Tutorials (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.697, + 0.913, + 0.736 + ], + "angle": 0, + "content": "[81] Xianan Zhang, Wei Wang, Xuedou Xiao, Hang Yang, Xinyu Zhang, and Tao Jiang. 2020. Peer-to-Peer Localization for Single-antenna Devices. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 4, 3 (2020), 1-25." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.736, + 0.913, + 0.767 + ], + "angle": 0, + "content": "[82] Zhenyong Zhang, Shibo He, Yuanchao Shu, and Zhiguo Shi. 2019. A Self-evolving WiFi-based Indoor Navigation System Using Smartphones. IEEE Transactions on Mobile Computing 19, 8 (2019), 1760-1774." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.767, + 0.913, + 0.798 + ], + "angle": 0, + "content": "[83] Tianyue Zheng, Zhe Chen, Jun Luo, Lin Ke, Chaoyang Zhao, and Yaowen Yang 2021. SiWa: See into Walls via Deep UWB Radar. In Proceedings of the 27th Annual International Conference on Mobile Computing and Networking (MobiCom)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.798, + 0.913, + 0.838 + ], + "angle": 0, + "content": "[84] Xiaolong Zheng, Jiliang Wang, Longfei Shangguan, Zimu Zhou, and Yunhao Liu 2016. Smokey: Ubiquitous Smoking Detection with Commercial WiFi Infrastructures. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM)." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.109, + 0.913, + 0.838 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.923, + 0.515, + 0.935 + ], + "angle": 0, + "content": "389" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06311/24099640-7df8-4696-ab55-8a633ecbda84_origin.pdf b/data/2025/2504_06xxx/2504.06311/24099640-7df8-4696-ab55-8a633ecbda84_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9c9289aec4d0fc8c362fb6431f6a0a6f742d8ecf --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/24099640-7df8-4696-ab55-8a633ecbda84_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fb9b5eaee0b88055b795c9edd8505f3a9f40a5a2b4982b7cb34b2aed6a6989d +size 2058368 diff --git a/data/2025/2504_06xxx/2504.06311/full.md b/data/2025/2504_06xxx/2504.06311/full.md new file mode 100644 index 0000000000000000000000000000000000000000..6158063e1d9aaf96051a7bf9a5b8bf71df3588e2 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/full.md @@ -0,0 +1,598 @@ +# Bifrost: Reinventing WiFi Signals Based on Dispersion Effect for Accurate Indoor Localization + +Yimiao Sun, Yuan He*, Jiacheng Zhang, Xin Na, Yande Chen, Weiguo Wang, Xiuzhen Guo + +School of Software and BNrist, Tsinghua University + +sym21@mails.tsinghua.edu.cn, heyuan@tsinghua.edu.cn + +{zhangjc21,nx20,cyd22,wwg18}@mails.tsinghua.edu.cn, guoxiuzhen94@gmail.com + +# ABSTRACT + +WiFi-based device localization is a key enabling technology for smart applications, which has attracted numerous research studies in the past decade. Most of the existing approaches rely on Line-of-Sight (LoS) signals to work, while a critical problem is often neglected: In the real-world indoor environments, WiFi signals are everywhere, but very few of them are usable for accurate localization. As a result, the localization accuracy in practice is far from being satisfactory. This paper presents Bifrost, a novel hardware-software co-design for accurate indoor localization. The core idea of Bifrost is to reinvent WiFi signals, so as to provide sufficient LoS signals for localization. This is realized by exploiting the dispersion effect of signals emitted by the leaky wave antenna (LWA). We present a low-cost plug-in design of LWA that can generate orthogonal polarized signals: On one hand, LWA disperses signals of different frequencies to different angles, thus providing Angle-of-Arrival (AoA) information for the localized target. On the other hand, the target further leverages the antenna polarization mismatch to distinguish AoAs from different LWAs. In the software layer, fine-grained information in Channel State Information (CSI) is exploited to cope with multipath and noise. We implement Bifrost and evaluate its performance under various settings. The results show that the median localization error of Bifrost is $0.81\mathrm{m}$ , which is $52.35\%$ less than that of SpotFi, a state-of-the-art approach. SpotFi, when combined with Bifrost to work in the realistic settings, can reduce the localization error by $33.54\%$ . + +# CCS CONCEPTS + +- Networks $\rightarrow$ Location based services; $\cdot$ Information systems $\rightarrow$ Location based services; + +# KEYWORDS + +WiFi Localization, Indoor Localization, Leaky Wave Antenna, RF Computing + +$^{\dagger}$ Yuan He is the corresponding author. + +![](images/908672f03a05015deaed74a1844456cfe761d014f09263752aad0d47f951d0ba.jpg) + +This work is licensed under a Creative Commons Attribution International 4.0 License. + +SenSys'23,November 12-17,2023,Istanbul,Turkiye + +© 2023 Copyright held by the owner/author(s). + +ACM ISBN 979-8-4007-0414-7/23/11...$15.00 + +https://doi.org/10.1145/3625687.3625786 + +![](images/5288f746a3d9c59551d74b46534fb59e7bfd78a7e34567f04627a892543b58df.jpg) +(a) + +![](images/3ff4fad4b6922ac9b775808d80c2e52e64e0bdd0222238fb46302e9792d486a7.jpg) +(b) +Figure 1: A model-driven method works well when (a) sufficient LoS signals are available but becomes inaccurate when (b) NLoS signals have to be used. + +# ACM Reference Format: + +Yimiao Sun, Yuan He, Jiacheng Zhang, Xin Na, Yande Chen, Weiguo Wang, Xiuzhen Guo. 2023. Bifrost: Reinventing WiFi Signals Based on Dispersion Effect for Accurate Indoor Localization. In The 21st ACM Conference on Embedded Networked Sensor Systems (SenSys '23), November 12-17, 2023, Istanbul, Türkiye. ACM, New York, NY, USA, 14 pages. https://doi.org/10.1145/3625687.3625786 + +# 1 INTRODUCTION + +Location information [32, 63, 79, 80] is crucial, especially for smart indoor applications [50, 60, 67, 72], such as smart home [54, 62], indoor navigation [7, 18, 19, 59] and so on. Due to the ubiquitous deployment of WiFi access points (APs) and wide availability of WiFi modules on the devices, WiFi-based localization [16, 25, 49, 56, 57, 61, 64, 65, 68-70, 73, 74, 82] appears to be promising for indoor localization. The existing works of WiFi-based indoor localization can be broadly grouped into two categories, data-driven methods and model-driven methods. + +Data-driven methods are typically represented by fingerprint [14, 44, 61]. These methods need to collect Received Signal Strength (RSS) or CSI at different places to construct a database mapping RSS + +![](images/dd3fffaab64c2aa57f33b3095c34259cf6579acc3062073b260c8ec1623d4fbf.jpg) +(a) Library (48 rooms) + +![](images/e49e4eeeefff0ee45a136194aac6af3b509d72117db7264bff2f5e731f512942.jpg) +(b) Office (54 rooms) +Figure 2: The number of LoS APs in each room in a library and an office building. + +(or CSI) with locations, which is a labor-intensive process. Also, their performance may be vulnerable to dynamic environments. + +Model-driven methods induce less labor cost and attract more research studies. Generally, a model-driven method calculates the location by estimating signals' Angle-of-Arrival (AoA) [2, 23, 24, 69], Time-of-Flight (ToF) [70, 81] or both [9, 16, 43]. Most of the existing approaches rely on Line-of-Sight (LoS) signals to work, as Fig. 1(a) illustrates, while a critical problem is often neglected: In the real-world indoor environments, WiFi signals are everywhere, but very few of them are usable for accurate localization. As an example to validate this finding, Fig. 2 plots the statistics of the real deployment of WiFi APs in a library (48 rooms) and an office building (54 rooms). The data shows that in nearly half of all the rooms, there is not even one LoS AP available. The rooms with sufficient LoS signals account for less than $5\%$ of all the rooms. In other words, the chance for a WiFi device to receive sufficient LoS WiFi AP signals, namely the case for it to be accurately localized by using an existing approach, is less than $5\%$ . That well explains why the practical performance of using the existing localization approaches is far from being satisfactory. + +A straightforward idea to address the above problem is to increase the number of deployed WiFi APs, until everywhere is covered by at least 3 LoS APs. It isn't practical, however. Taking the library and office building investigated in Fig. 2 as an example, typically there are 50 rooms in a building. Covering every room with 3 APs requires 150 APs to be deployed, which means multiple drawbacks, such as substantial deployment cost of cables (connecting the APs), overly crowded wireless spectrum, and frequent interference and collisions in the wireless communication. + +This paper presents a novel approach called Bifrost, a plug-and-play and cost-effective scheme to significantly enhance the availability of LoS WiFi signals and in turn the localization accuracy. In light of the research progress on leaky wave antenna (LWA) in recent years [21, 22, 42, 47, 48, 76], Bifrost exploits dispersion effect of wireless signals [33]. Deployed in the space covered by WiFi signals, a LWA can receive those signals and then radiate them at different frequencies towards different directions, exhibiting frequency and spatial division multiplexing (FSDM) features, as is reinventing2 WiFi signals. + +![](images/823419a8c889392fc2b02cd1e4c1b440b94537a0158211bb0cbf7bc569ae4ec1.jpg) +Figure 3: The high-level principle of Bifrost. + +Fig. 3 illustrates the high-level principle of Bifrost. To localize a target device, Bifrost uses two LWAs to transform WiFi signals into FSDM signals, so the target device will receive two LoS FSDM signals with a unique pair of frequencies. Since the frequency and the propagation direction of FSDM signals are coupled, the target device can estimate its AoAs to both LWAs by analyzing the received spectrum and then calculate its location. + +Compared with using WiFi APs, using LWA to assist localization offers the following two distinct advantages: + +1) Cost-effective. The cost of a LWA in Bifrost is 7.41 USD (4.36 USD for the material cost and 3.05 USD for the control module), which is significantly lower than that of a WiFi AP (typically $30 \sim 100$ USD [3-6]). +2) Easy to Use. Deploying a LWA is very convenient. It can operate in a plug-and-play manner without the need for connecting power cables. + +Leveraging these two advantages, Bifrost can be easily implemented in any environment with WiFi coverage, no matter whether the WiFi signals are LoS or not. Bifrost can either work independently, or cooperatively with other conventional WiFi-based localization methods. + +The design of Bifrost tackles several critical challenges, which are summarized as follows: + +Ambiguity between Different LWAs. As Fig. 3 shows, a target device may receive signals from two LWAs, which are reinvented from the same WiFi signal source. Without a special design, it is almost impossible for the target to distinguish one LWA from the other. To overcome this problem, the LWAs in Bifrost are designed to generate orthogonal circular polarized (CP) signals, so that they won't mix up with each other (§3.1). Polarization of LWA signals can be conveniently switched by altering the input port of WiFi signals, without the need for reconstruction or modifications to the LWA's structure. + +![](images/2109f2078048336a2c4a81a265742db1ac720ab5b8a945f966c7e74d9179e936.jpg) +(a) Linear polarization (LP). + +![](images/5bb17e241be2f79ada098e07caca36cc7af2ffba3ddb0c74fca93996c1870e12.jpg) +(b) Circular polarization (CP). + +![](images/bbed68b7c7c2a6dffa3e458e459a9cd13deffd3c1a4950f2c36f70d280dc1402.jpg) +Figure 4: The properties of polarized electromagnetic waves. + +![](images/de065f1018fd63684ca76984649d3bbde8bb74dc9f72d0dd5cb8341546d23646.jpg) +(c) Elliptical polarization (EP). +(d) CP signal synthesis. + +Signal Extraction from the Interfered Frequency Band. Since FSDM signals radiated by LWAs are transformed from existing WiFi signals, the two types of signals operate within the same frequency band and can be simultaneously received by a target device. Directly using such signals leads to erroneous AoA estimation. To deal with such interference, LWAs in Bifrost work in a duty-cycled manner. The target device is able to detect distinctive variation of the signal amplitude at the frequencies of FSDM signals (§3.3). By analyzing WiFi CSI, the target device can effectively extract the desired FSDM signals from the interfered frequency band. + +Indoor Multipath Effect. The multipath effect in the indoor environment may seriously affect the quality of the received FSDM signals and further affect the localization accuracy. In order to identify FSDM signals propagating along the LoS path, Bifrost operates in two steps. First, we map frequencies of FSDM signals with subcarriers in CSI and cluster adjacent subcarriers to only retain the cluster with the highest energy (§3.4). Second, we take the intersection of two clusters (corresponding to the two orthogonal CP signals), and determine the final frequency by weighting the center frequency of the remaining clustered subcarriers (§3.5). + +Our contributions can be summarized as follows: + +1) We tackle a significant problem, namely the limited availability of LoS signals, which is overlooked by the existing works on WiFi-based indoor localization. We reinvent WiFi signals by exploiting the dispersion effect, which represents a new direction of utilizing LWAs. +2) We address a series of non-trivial challenges, such as signal ambiguity, interference, and multipath effect, etc. The design of Bifrost effectively ensures the quality of signals used for localization. +3) We implement Bifrost and evaluate its performance under various settings. The results show that the median localization error of Bifrost is $0.81\mathrm{m}$ , which is $52.35\%$ less than that of SpotFi, a state-of-the-art approach. SpotFi, when combined with Bifrost to work in the realistic settings, can reduce the localization error by $33.54\%$ . + +This paper proceeds as follows: §2 introduces background knowledge on the signal polarization and the LWA. Then §3 unfolds the design of Bifrost in both hardware and software. The implementation and evaluation results are presented in §4. We discuss practical issues in §5 and summarize related works in §6. This work is concluded in §7. + +# 2 PRIMER + +This section introduces preliminary knowledge of our work: polarization of wireless signals and leaky wave antenna. + +# 2.1 Signal Polarization + +Polarization is a fundamental property of wireless signals, including FSDM and WiFi signals investigated in this work. It represents the direction of the signal's electric field, which can be denoted as $\vec{E}$ + +and can be decomposed into the horizontal component $\overrightarrow{E_x}$ and the vertical component $\overrightarrow{E_y}$ . There will be a phase difference $\Delta \phi \in [0, \pi)$ between these two orthogonal components, leading to the following elliptic equation + +$$ +\begin{array}{c} - \overrightarrow {E _ {x}} - 2 \\ \frac {E _ {x 0}}{E _ {x 0}} \end{array} + \frac {E _ {y}}{E _ {y 0}} - \frac {2 \overrightarrow {E _ {x}} \overrightarrow {E _ {y}}}{E _ {x 0} E _ {y 0}} \cos (\Delta \phi) = \sin^ {2} (\Delta \phi), +$$ + +where $E$ and $E$ are amplitudes of $\overrightarrow{E}$ and $\overrightarrow{E}$ . According to the + +value of $\Delta \phi$ , the polarization of $\overrightarrow{E}$ can be divided into the following three categories: + +When $\Delta \phi = 0$ or $\pi$ : we have $\overrightarrow{E_y} = \frac{E_{y0}}{\pm E_{x0}}\overrightarrow{E_x}$ , so the signal is linear polarized (LP), as shown in Fig. 4(a). The polarization direction hinges on $\pm \frac{E_{y0}}{E_{x0}}$ , the ratio of $\overrightarrow{E_x}$ and $\overrightarrow{E_y}$ . + +When $\Delta \phi = \pm \frac{\pi}{2}$ : we have $\vec{E}^2 + \vec{E}^2 = \vec{E}^2$ , and now the signal is circular polarized (CP), as Fig. 4(b) illustrates. Besides, Fig. 4(d) provides another perspective on how the CP signal is decomposed into two LP signals. Depending on whether $\Delta \phi$ is positive or negative, the rotation direction of the CP signal is in either left-hand circular polarization (LHCP) or right-hand circular polarization (RHCP), which are orthogonal and won't interfere with each other. + +When $\Delta \phi$ is Other Values: the signal is elliptical polarized (EP), as Fig. 4(c) depicts. Similar to the CP signal, the EP signal also can be divided into left-hand or right-hand. + +Impact of Polarization on the Rx: The polarization of a signal is accorded with that of its transmitting antenna but may change during propagation. To ensure effective reception, it should match the polarization of the receiving antenna, partially at least. Fig. 5 illustrates how polarization mismatch affects the received signal strength (RSS). + +For the LP signal and antenna, RSS decreases as the angle of these two polarization directions increases from $0^{\circ}$ to $90^{\circ}$ . For the CP signal, the signal can be decomposed into two orthogonal LP + +![](images/9c50c21f9f2753fbea75feba6489560ab1bb6bd4e497c4e119371cab5de559c0.jpg) +Figure 5: RSS variation according to the polarization of signals and Rx. + +signals. Thus, the LP antenna can only receive the component whose polarization direction is parallel to itself but loses half of the signal energy. Similarly, the CP antenna can only receive half of the LP signal's energy. However, when LHCP antenna is used to receive RHCP signals or vice versa, RSS is theoretically zero because these two polarizations are orthogonal. That is the reason why Bifrost can eliminate the ambiguity of two FSDM signals radiated from different LWAs. + +# 2.2 Leaky Wave Antenna + +LWA belongs to the class of traveling-wave antennas, where the propagating wave inside the antenna structure can "leak" (i.e., radiate) from the waveguide to the free space, hence the name. It can distinctively couple the leaky wave's frequency and radiation direction to produce a frequency and spatial division multiplexing (FSDM) signal, as shown in Fig. 6. Specifically, direction of the + +$$ +\text {s i g n a l} \overrightarrow {E _ {f}} \text {w i t h f r e q u e n c y} f \text {c a n b e d e t e r m i n e d b y [ 7 1 ] :} +$$ + +$$ +\theta (f) = \operatorname {a r c c o s} _ {k _ {0} (f)} ^ {\prime} \tag {2} +$$ + +where $\beta (f)$ and $k_{0}(f)$ are the phase constant along the LWA and the propagation constant in the free space w.r.t $E_{f}$ [52]. + +Currently, two main types of LWAs have been extensively studied. 1) The uniform LWA, which employs a metallic waveguide with a slit cut along its length [21, 22, 42, 76], as depicted in Fig. 6(b). The FSDM signal leaked from a uniform LWA can only propagate towards the forward region (i.e., $\theta^0$ , $90^\circ$ ). 2) The periodic LWA, which is typically designed using a dielectric substrate with a periodic array of metal strips (i.e., slots) [10-13] and similar to an antenna array, as shown in Fig. 6(a). The FSDM signal of this type of LWA can propagate towards both forward and backward regions (i.e., $\theta^0$ , $180^\circ$ ) [33]. + +Periodic LWA has been widely studied in recent research due to its versatile slot design and low-cost fabrication using the printed + +![](images/36b40804dcad7a772c81b3692c34777cfdd86cb7e5c8e87d5c00d280bc55e7a9.jpg) +Figure 6: Typical structures of leaky wave antenna3. + +circuit board (PCB) process. These attributes have made it a popular choice in various applications. Bifrost also employs the periodic structure to produce circular polarized signals. + +# 3 BIFROST + +In this section, we first articulate how to design the circular polarized LWA (i.e., CPLWA) to transform the input LP signal into the CP signal with the FSDM feature. Then, we present details of our approach of localization with the CPLWA. + +# 1.1 CPLWA Design + +Unlike many traditional LWAs [10, 13, 21, 22], Bifrost utilizes $\mathrm{CP^4}$ (i.e., RHCP and LHCP) to distinguish different LWAs and corresponding FSDM signals. We specially design a CPLWA that can generate both LHCP and RHCP signals. As shown in Fig. 7(a), our CPLWA has both vertical and horizontal slots to generate orthogonal LP signals, and further to form the CP signal (the bifurcation is designed for performance optimization). According to Eq. (1), a $\frac{\pi}{2}$ phase difference between two LP signals is necessary to generate the CP signal, and this is achieved by adjusting the length of the slots. Denoting the guided wavelength at $5.25\mathrm{GHz}$ of the substrate material is $\lambda_{g}$ , the distance between the center of the horizontal and the vertical slots is $\frac{\lambda_g}{4}$ . + +In the fabrication process of CPLWA, we adopt a two-layer copper-clad substrate structure, as shown in Fig. 7(b). The substrate material is F4BM-2, whose permittivity $\epsilon = 3.02$ . The top and bottom layers of the substrate consist of copper and have undergone tin immersion plating to prevent oxidation. The bottom layer of copper functions as the ground, and the shorting vias are incorporated to penetrate the substrate, connecting the top and bottom layers in order to ground the top layer. These shorting vias are periodically arranged on the upper and lower boundaries of the substrate and the patch. + +The final structure of our proposed CPLWA is depicted in Fig. 7(c), where multiple units are linearly arranged together to enhance the directivity of the FSDM signal, which is similar to the antenna array. Note that a CPLWA is composed of 6 units as an illustration, but 11 units are arranged in practice. This CPLWA features two ports on both ends: one is the feed port that connects to an LP antenna for absorbing the WiFi signal, and the other should connect to a + +![](images/7d0df6daa0e60f943e48d51af1e118545c15659453e80866a132fc5dabc9f7dc.jpg) +(a) Unit of the LWA. + +![](images/a4811d8b72e0b779962645388f118c8dadfe40b3817213814d85446162eba8f8.jpg) +(b) Layered structure. + +![](images/77325214c9a5a697d9c27756c1cfc8c1669913385909b07abdad9bc4bfa1f232.jpg) +(c) Complete design. +Figure 7: General view of CPLWA used in Bifrost. + +matched $50\Omega$ load. By changing the signal feed port, polarization of the FSDM signal can switch between LHCP and RHCP. If the input signal has gone through all slots and reached the other end, yet still has energy remaining, the matched load will absorb the excess signal. + +The CPLWA used in Bifrost is specially designed at 5.17GHz-5.33GHz WiFi band, while this structure and design methodology are universally applicable for other frequencies and bandwidths by properly modifying the relevant parameters. + +Now we conduct a quick validation to show the key performance of the proposed CPLWA using ANSYS HFSS. Firstly, the direction of the FSDM signal $w$ r.t different frequencies is depicted in Fig. 8(a). There is a total $22^{\circ}$ field of view (FoV) across the operating frequency band (5.17GHz-5.33GHz). Note that when the LP signal is fed into the right port or left port, the RHCP or LHCP signal will be radiated from $22^{\circ}$ to $44^{\circ}$ or $136^{\circ}$ to $158^{\circ}$ , respectively. Fig. 8(b) shows the energy distribution of signals at five different frequencies. It is evident that the energy of the leaky signal concentrates on the correct direction, and their realized gains are all above 11.5dB. Therefore, the direction can be easily identified by examining the energy distribution of signals. + +With the proposed CPLWA, we will proceed with elaborating on the core localization algorithm in Bifrost. + +# 1.2 Basic Localization Model + +Let $S_{l}$ and $S_{r}$ respectively denote LHCP and RHCP signals that propagate from corresponding LWAs to the target via the LoS paths. The frequencies of these two signals, $f_{l}$ and $f_{r}$ , are what we desire for calculating the location. Recall that $S_{l}$ and $S_{r}$ are featured in frequency and space division multiplexing (FSDM) and orthogonal $\mathrm{CP}^5$ , so these two signals won't interfere with each other. As a result, the target can estimate its relative direction to both LWAs based on the received spectrum and the radiation pattern of the two LWAs. Further, given locations of two LWAs, $L_{r}$ , $\kappa_{r}$ , $y_{r}$ , $z_{r}$ of the RHCP LWA and $L_{l}$ , $\kappa_{l}$ , $y_{l}$ , $z_{l}$ of the LHCP LWA, the target can output its absolute location. In detail, as we mentioned in §2, the radiation pattern of the LWA is a conical + +![](images/b24fbb91f95a7b3b3fd9b8a04a914b391287c260958fa8809ac3f9870b57a917.jpg) +(a) Main beam direction. +Figure 8: Key results of the CPLWA. + +![](images/6fb82dec0dccfa3d8c1ed30bcc9878629f29b2090b1d4daa1b5f62c3ee02be85.jpg) +(b) Realized gain. + +surface at a specific frequency. Therefore, the location $L_{t}$ ( $x_{t}, y_{t}$ ) of the target device is the intersection point of the two conical surfaces and the horizontal plane of its height. By combining these conditions, $L_{t}$ can be estimated by solving the following equation set: + +$$ +\begin{array}{l} F \left(L _ {r}, f _ {r}\right), \\ L _ {t} = \left(x _ {t}, y _ {t}\right) = F \left(L _ {l}, f _ {l}\right) \tag {3} \\ \end{array} +$$ + +where $z_{t}$ is the target's height; functions $F(r, f_{r})$ and $F(l, f_{l})$ are mathematical equations of conical surfaces with the location of LWAs as the vertex. These two equations indicate the propagation directions of RHCP and LHCP signals at frequencies $f_{r}$ and $f_{l}$ , respectively. Taking the RHCP signal as an example, $F = F(L_{r}, f_{r})$ can be formulated as + +$$ +F = (x - x _ {r}) ^ {2} - \frac {(y - y _ {r}) ^ {2}}{a ^ {2}} - \frac {(z - z _ {r}) ^ {2}}{a ^ {2}}, \tag {4} +$$ + +where $a = \cot [\theta (f_r)]$ + +However, there are two other types of signals impacting the localization accuracy when Bifrost functions: 1) LP WiFi signal that is emitted by the WiFi AP, and then received by the target. This signal establishes data communication between the target and the AP and propagates in both the LoS path and multipath. It is also the input signal of LWAs, which will be transformed into FSDM signals by the LWAs. 2) CP multipath signal that propagates from LWAs to the target after reflection, resulting in undesired noisy signals at the target. + +Thus, we should first identify the frequency of the FSDM signal from the LP WiFi signal (discussed in §3.3) and then filter out the CP multipath signal as much as possible (discussed in §3.4 and §3.5), to accurately estimate frequencies, $f_{l}$ and $f_{r}$ , and the target's location. + +# 1.3 Identifying Frequencies of CP signals + +When Bifrost functions, LWAs need the LP WiFi signal as input, and the target device may also need it for data communication with the WiFi AP. Nevertheless, the LP signal may interfere with the reception of the CP signal, because CP antennas at the target device can receive the LP signal (as already explained in §2). To cancel this interference, we control LWAs to be periodically turned on and off, working in a duty-cycled manner. This design allows the target to identify frequencies that correspond to the CP signal by analyzing the variation in its received spectrum, and at the same + +![](images/279daa2a6c56e4e94198173bb12843489cab9266b1f6d6bb7f2e5b98f6a29900.jpg) +(a) Normalized amplitude variation. + +![](images/c4cebafb43021fd59b73e2a54abe86e3e358b8ba47278543a6279387ef8f17fb.jpg) +(b) Normalized phase variation. +Figure 9: Standardized CSI variation. + +time, saves energy of LWAs. Specifically, we exploit WiFi CSI [27, 28, 75, 78] to explore fine-grained information on the amplitude and phase of the subcarriers. Fig. 9 illustrates the result of a proof-of-concept experiment, where subcarriers correspond to LoS and multipath signals are distinguishable in the normalized amplitude of CSI. However, the variation in phase is not obvious, making it challenging to discern useful subcarriers because they are often obscured by random errors and noise. According to this result, we can only extract frequencies of the CP signal based on the amplitude variation in CSI. + +As a LWA turns on or off, we denote the corresponding CSI as $H_{on}(f_k)$ and $H_{off}(f_k)$ for the $k$ -th subcarrier with center frequency $f_k$ , respectively. The former is jointly influenced by CP and LP signals, while the latter is determined by the LP signal only, leading to the following relationship: + +$$ +\begin{array}{l} \left\| H _ {o n} \left(f _ {k}\right) \right\| = \left\| H ^ {C P} \left(f _ {k}\right) + H ^ {L P} \left(f _ {k}\right) \right\|, \\ \left\| H _ {o f f} \left(f _ {k}\right) \right\| = \left\| H ^ {L P} \left(f _ {k}\right) \right\|, \tag {5} \\ \end{array} +$$ + +where $|H^{CP}f_k|$ is the amplitude of subcarriers corresponding to the CP signal, and $\| H^{LP}(f_k)\|$ is that of the LP signal. Based on these two values, we can quantify the variation of CSI caused by the CP signal: + +$$ +\begin{array}{l} \| \Delta H (f) \| = \| H ^ {C P} (f) \| _ {k} \\ = \| H ^ {C P} \left(f _ {k}\right) + H ^ {L P} \left(f _ {k}\right) \| - \| H ^ {L P} \left(f _ {k}\right) \| \tag {6} \\ = \| H _ {o n} \left(f _ {k}\right) \| - \| H _ {o f f} \left(f _ {k}\right) \| \\ \end{array} +$$ + +In order to accurately analyze this variation and mitigate the effect of occasional outliers and noise, a Z-Score normalization procedure is performed on $\Delta H(f_{\mathbb{H}})$ . We execute a preliminary screening to quickly filter out the subcarriers that are less likely corresponding to the frequencies of the CP signals. A percentage threshold $\varepsilon \in \mathbb{P}, 1$ is set to select subcarriers with a larger value of $\Delta H f_{k}$ indicating that these subcarriers undergo significant changes and are more likely to be affected by the CP signal. The value of $\varepsilon$ is chosen empirically based on the degree of multipath. Fig. 10(a) shows a high-level overview of the selected subcarriers, where LHCP and RHCP signals are highlighted in red and blue, + +![](images/bc61a0518220150b607c1d66cc7dc6947f380c2b1009e36ae95bd6ebef2eccde.jpg) +(a) Selecting frequencies of CP signals. + +![](images/b8947759e9bead7e2b64a1301febbf053eed3a0fa8aef46ad73d69ef35fc88f1.jpg) +(b) Filtering out multipath signals. + +![](images/c0ca63a466b48a65700cdca67ed5ce4af8d69cd93ba98cefd9921e9d28cfe97c.jpg) +(c) Align subcarriers. +Figure 10: Workflow of selecting correct frequencies (LHCP and RHCP are distinguished by red and blue colors). + +![](images/75fa5e29537f55972cfe2ab54947c4583cfc8202904c13b1ea132fa3d329ee66.jpg) +(d) Estimate frequencies. + +respectively. In subsequent stages, we exclusively focus on these selected subcarriers. + +# 1.4 Filtering out the Multipath Signal + +As shown in Fig. 9(a), even though we have identified the frequencies of the CP signal from the WiFi signal, there still exists the multipath signal, resulting in undesired variation in $\Delta H$ . Note that the multipath signal is mainly introduced by reflection of the CP FSDM signal. We find that subcarriers corresponding to the multipath signal can be divided into two categories: 1) Sparsely clustered subcarriers $C_s$ : FSDM signal with different frequencies and propagation directions may go through reflection at many places, but only a few of those signals reach the target with inconsecutive frequencies, resulting in many sparse clusters of subcarriers6. 2) Compactly clustered subcarriers $C_c$ : There are some FSDM signals with frequencies close to that of the LoS signal. Those FSDM signals reflect just right near the target device, which will result in a + +compact and wide cluster of subcarriers influenced by multipath and LoS signals. + +Here we first try to filter out $C_s$ . To do so, all the varied subcarriers are clustered, respectively, as Fig. 10(b) illustrates. Then, the following integral function will be calculated for every cluster to find the one most likely to be corresponding to the LoS signal, + +$$ +C ^ {i} = \begin{array}{c} \int f _ {k} ^ {i} \\ f _ {k _ {\mathrm {m i}}} ^ {i} \end{array} \| \Delta H (f _ {k} ^ {i}) \| d f _ {k} \tag {7} +$$ + +where $f_{k_{\min}}^{i}$ and $f_{k_{\max}}^{i}$ are the minimum and maximum frequencies of the $i$ -th cluster, respectively. The value of $C^i$ can be regarded as the area formed by the curve of $\| \Delta H(f_k^i)\|$ and the two frequencies $f_{k_{\min}}^{i},f_{k_{\max}}^{i}$ . The wider the bandwidth and higher the amplitude of a cluster are, the greater the value of its $C^i$ is. + +After that, we only retain the cluster that bears the highest $C^i$ , which is most likely to be $C_c$ and contains subcarriers corresponding to the LoS signal. However, as we mentioned before, some subcarriers in $C_c$ are also corresponding to the undesired multipath signal. Next, we are going to purify $C_c$ by narrowing down its frequency range as much as possible. + +# 1.5 Purifying the LoS Signal for Localization Denote the frequency range of $C_c$ as $k^{\prime}$ , $k_{\mathrm{max}}$ for RHCP signals + +min + +and $k_{\mathrm{mi}}^l, k_{\mathrm{max}}^l$ for LHCP signals. In both of the two ranges, we are going to find the subcarrier with the largest $\Delta H(f_k|g)$ as Fig. 10(c) illustrates. After obtaining them, we denote the index of selected subcarriers as $K^r$ and $K^l$ . Next, as Fig. 10(c) depicts, we align $K^r$ and $K^l$ , then trim the head and tail to retain the intersection of two clusters, $\| \Delta H^r (f_k) \|$ and $\| \Delta H^l (f_k) \|$ . Finally, we multiply $\| \Delta H^r (f_k) \|$ and $\| \Delta H^l (f_k) \|$ to form a weight matrix $G$ , which is illustrated in Fig. 10(d). + +$$ +\begin{array}{l} \left\| \Delta H ^ {r} \left(f _ {K ^ {r} - \delta}\right) \right\| \\ G = \quad \dots \quad \times \left\| \Delta H ^ {l} \left(f _ {K ^ {l} - \delta}\right) \right\| \dots \left\| \Delta H ^ {l} \left(f _ {K ^ {l} + \delta}\right) \right\| \tag {8} \\ \left\| \Delta H ^ {T} \left(f _ {K ^ {T} + \delta}\right) \right\| \\ \end{array} +$$ + +whenteh,ivahs theegh andhylvostnabatngthe wengthafverage + +of values in $[f_{K^{\text{r}}\delta}, f_{K^{\text{r}}\delta}]$ and $f_{K^{\text{l}}\delta}, f_{K^{\text{l}}\delta}$ , which are weighted by the corresponding values in the matrix $G$ . The purpose of this step is still to mitigate the interference of the multipath signal. After that, the estimated values of the two frequencies will be fed into Eq. (4) to output an estimation of the target's location. Note that if there are multiple WiFi links for selection, one can choose the link that results in the smallest size of $\| \Delta H(f_k)\|$ , meaning that the range of LoS signals' frequency is reduced to the minimum. + +Note that the basis of our localization algorithm is using the different CP signals to distinguish different LWAs, and the CP signals can't be replaced by the LP signals. The reason is that the LP signals may lead to high localization errors or even the breakdown of the localization system. Specifically, once the orientation of LP devices changes, polarization directions of these devices change accordingly. As such, each receiving antenna is very likely to receive FSDM signals from both LWAs and can't distinguish them. + +![](images/e14dd07adfafe5f6159b6067571cb4b5d052bc770da68484928b81b7d7546a66.jpg) +Figure 11: Hardware Settings. + +For example, a receiving antenna with $0^{\circ}$ polarization can receive both $0^{\circ}$ and $90^{\circ}$ polarized FSDM signals after rotating $45^{\circ}$ . In this case, the target can't distinguish FSDM signals from the two LWAs, and then the localization system can't work. Note that this problem can't be avoided since the target antenna's orientation isn't known in advance. In contrast, CP signals are free from this problem. The RHCP signal can't be received by LHCP antennas no matter which orientation the target antenna has. + +Next, we will proceed with describing the prototype implementation to gain insights on the performance of Bifrost in varied settings. + +# 4 EVALUATION + +We evaluate the performance of Bifrost using two low-cost PCB-based LWAs working at 5.17GHz-5.33GHz and a WiFi sensing plat + +form called PicoScenes [38] to extract CSI. When Bifrost functions, the WiFi transceiver communicates at the same band based on 802.11ax standard [1]. We first describe our implementation and evaluation settings in §4.1. Then, investigation on Bifrost's performance is four-pronged: §4.2 compares Bifrost with SpotFi [43], the state-of-the-art indoor WiFi localization technique, in a real-world indoor setting and NLoS scenarios, and then shows how the localization accuracy can be improved when Bifrost aids SpotFi to function in AP-constrained scenarios; Subsequently, in §4.3, we conduct an ablation study to evaluate the contribution of each sub-module of localization algorithm; Then, in §4.4, we dissect the impacting factors on localization accuracy, including multipath, transmission power, as well as the distance between LWAs and the AP; Also, we evaluate the influence of deploying Bifrost on data communication of WiFi transceivers in §4.5; Finally we summarize the evaluation in §4.6. + +# 4.1 Implementation and Experimental Methodology + +Hardware and Software. Our proposed LWA is shown in Fig. 11(b). The main body of our LWA is $24.2\mathrm{cm} \times 5.2\mathrm{cm}$ , containing 11 single units designed to ensure most input signals' energy can be leaked out. One of the LWA's feed ports is connected to a LP antenna for receiving the WiFi signal while the other port is connected to a $50\Omega$ matched load to absorb the remaining energy of the signal that goes through the entire LWA structure. By switching the feed port, the polarization of the FSDM signal can be altered between LHCP and RHCP. Besides, a low-noise amplifier powered by a small rechargeable battery is utilized to boost the input signal with 0.43W power consumption. A NE555 timer IC with a load switch circuit + +![](images/ac1f54c4acd4c26f1fbb5b2b4901becd11f79892dd7ce7e313931dd158e5060a.jpg) +(a) + +![](images/eefd18541489eb0898ad31fa879c5762152ff2479e5c18822d2667f50c066d6b.jpg) +(b) + +![](images/79105e03737063bf0d3574cf9f3347ffdb081d0f7005a9b3f158116807912a80.jpg) +(c) +Figure 12: Experimental scenarios and deployment: (a) The hall scenario; (b) The classroom scenario; (c) The APs' deployment in the corridor and the classroom; (d) The APs' deployment in the hall and the meeting room. + +![](images/f1b54cdfe48e3f891784027e9e717ec71afd8653ef63103aea8b00ad7e668023.jpg) +(d) + +is employed to control the on-off state of the amplifier and further LWAs, resulting in a $20\%$ duty-cycled manner for energy saving. The cost of each proposed LWA is 7.41 USD, where 4.36 USD is for the material cost and 3.05 USD is for the control circuit. To receive the CP FSDM signal, we equip the target with two $3.87\mathrm{cm}\times 3.87\mathrm{cm}$ patch antennas, as Fig. 11(a) depicts. One antenna is LHCP, while the other is RHCP, and both are fixed on the antenna mount connected to COMFAST AX210 WiFi card [17] on the host computer. + +We use PicoScenes, a WiFi sensing platform, to send WiFi packets at AP with 20dBm, and extract CSI at the target. In the working band of Bifrost, PicoScenes can procure CSI data of 2025 subcarriers with indexes $[-1012, 1012]$ . We run PicoScenes on Ubuntu 20.04, + +then analyze CSI data and execute the localization algorithm on MATLAB 2022b. + +Baseline. We compare Bifrost with SpotFi, the state-of-the-art indoor WiFi localization technique, under various settings. To ensure the validity of our results, we make our best effort to re-implement SpotFi and ensure fairness through comparison. We evaluate the performance of SpotFi by deploying multiple WiFi APs strictly based on the real-world settings of WiFi APs, as Fig. 12 shows. Before each set of experiments, we use a laser rangefinder to obtain the ground-truth, including coordinates of the target device and LWAs. + +Scenarios and Deployment. We select four typical indoor scenarios for evaluation, across different sizes and different levels of multipath effect: 1) A small-size hall (6.2m×4.5m) with few multipath; 2) A long and narrow corridor (7.5m×2.1m) with few multipath; 3) A small-size meeting room (5.7m×4.9m) with rich multipath; 4) A large-size classroom (10.6m×7.1m) with rich multipath. In each scenario, two LWAs are attached to two orthogonal walls. The target device is mounted onto tripods, keeping the height constant across all experiments. + +# 4.2 Overall Performance + +In this section, we first evaluate the localization accuracy of Bifrost and SpotFi in real-world settings, where WiFi APs in experiments are deployed at the same positions as those in practice. Then we deploy Bifrost in the meeting room and classroom, where SpotFi doesn't work well, to enhance the performance of SpotFi, so as to see the accuracy improvement brought by Bifrost. + +Performance Comparison in Realistic Settings. In reality, most indoor WiFi APs are dispersively deployed at different locations and very likely separated from each other by walls so that LoS paths are usually obstructed. Thus, the target device is hard to establish more than one LoS connection with APs, according to our real-world investigation (i.e., Fig. 2). We evaluate the performance of SpotFi in these practical indoor settings, and also the localization error of Bifrost when deployed in the above-mentioned four scenarios. 50 locations are chosen in each scenario for location estimation. The evaluation results are reported in Fig. 13 (The solid blue line stands for Bifrost and the dashed red line stands for SpotFi). + +In the hall, both Bifrost and SpotFi are supposed to exhibit the best performance due to the low-level multipath effect, but the median error of SpotFi is $1.23\mathrm{m}$ , which is more than $\mathcal{Z}$ of Bifrost's $0.61\mathrm{m}$ . This is because only one decent LoS signal can be obtained at most locations due to the blockage of walls even though three APs are deployed around. As the pie chart illustrates, SpotFi outperforms Bifrost at only 9 locations. When it comes to the corridor scenario, the median error of SpotFi increases to $1.77\mathrm{m}$ because two of the three APs are situated inside rooms so that AoAs obtained by the target are heavily distorted. We note that the median error of Bifrost also increases to $0.76\mathrm{m}$ . This slight performance degradation is mainly due to the extension of the localization range, which is further investigated in §4.4. + +Next, we switch to the meeting room where more pronounced multipaths exist. What's worse, there is no AP in the meeting room, more challenging for both two approaches to function. The accuracy of the two approaches is unsurprisingly degraded, where the median error is $1.95\mathrm{m}$ in SpotFi and $0.91\mathrm{m}$ in Bifrost. Similarly, the + +![](images/20c13b4e9eca0eb9f70a5bb01dab7d7b75d52113724c5af0424c18db95b25e49.jpg) +(a) Hall + +![](images/75fa902b814efe455e4e687ee56b920319ee15e0c247276fed013941dd966a6b.jpg) +(b) Corridor + +![](images/13b58135f6e1b371566e5ed5acca1a3ad836d23cf639ccd43f0c1935dc9868e1.jpg) +(c) Meeting room + +![](images/921fa998df71dbda1001c882766862254b51e8730c8684b124dbf457256bc250.jpg) +(d) Classroom + +![](images/60f56b36363b5e62b664b4fda9fd7d15b212e2b11c0e085fa5105f820f27e554.jpg) +(a) The NLoS AP outdoors. +Figure 14: Deployment of the NLoS settings. + +![](images/6bb200cf265600ee644a5c41e6e2c4fcc79f9f4fe3811f9f401168875b39ae3c.jpg) +Figure 13: Overall performance of Bifrost and SpotFi across different scenarios (The pie charts represent how many locations where each method shows a lower error). +(b) The NLoS AP indoors. + +performance of SpotFi is restrained due to the lack of the LoS signal. Bifrost exhibits acceptable performance in this tough environment and avoids escalation of errors. This can be attributed to two aspects. On one hand, Bifrost can function once the input signal has enough energy, without the need for LoS AP. On the other hand, Bifrost exploits a delicate algorithm to tame the multipath effect. We will further discuss issues of multipath and NLoS in §4.4. In this scenario, SpotFi doesn't outperform Bifrost on any point. + +Finally, we set SpotFi and Bifrost in the large-size classroom with rich multipath. With a LoS AP, the median error of SpotFi is reduced to $1.87\mathrm{m}$ , which is better than that in the meeting room with no LoS AP. By contrast, the median error of Bifrost increases to $1.20\mathrm{m}$ , mainly due to a longer distance between LWAs and WiFi APs and more multipath. + +Through all experiments in four scenarios, the median error of Bifrost is $0.81\mathrm{m}$ , which is $52.35\%$ less than that of SpotFi (i.e., $1.70\mathrm{m}$ ). Bifrost outperforms SpotFi at most locations, except at which the target can obtain 3 LoS signals from 3 APs. However, as shown in Fig. 13, the chance for SpotFi to achieve better performance is less than $7\%$ . + +Performance Comparison in NLoS Scenarios. Then we conduct two groups of experiments to demonstrate Bifrost's ability of localization in NLoS scenarios and compare its performance with that of SpotFi. + +In the first group of experiments, we deploy the localized target and the LWAs in a hall. As Bifrost only uses one AP to function, + +we evaluate the performance of Bifrost when this AP is inside and outside the hall (i.e., LoS and NLoS scenarios). The results in Fig. 15 show that the median errors of Bifrost are $0.61\mathrm{m}$ in LoS and $0.73\mathrm{m}$ in NLoS, respectively. Meanwhile, in the same hall, we also evaluate the performance of SpotFi in LoS and NLoS scenarios, respectively. In the LoS scenario, 3 APs are deployed in the hall and can establish LoS connections with the target. In the NLoS scenario, as Fig. 14(a) shows, one of the APs (i.e., AP1) is outside the room, while the other 2 APs (i.e., AP2 and AP3) can connect with the target along the LoS paths. We find that the median error of SpotFi increases from $0.45\mathrm{m}$ in LoS to $1.15\mathrm{m}$ in NLoS. The error may further go beyond $1.6\mathrm{m}$ if only one AP is left in LoS, as reported in [43]. + +In the second group of experiments, we compare the performance of Bifrost and SpotFi using a different NLoS setting. As Fig. 14(b) shows, we deploy the localized target, LWAs and three APs in the same hall. One of the three WiFi APs (i.e., AP1) is deliberately deployed around the corner and surrounded by multiple chairs, so it can't establish LoS connections between the target or the LWAs, while the other 2 APs (i.e., AP2 and AP3) can connect with the target along the LoS paths. SpotFi uses all 3 APs to localize the target, and its median error is $1.21\mathrm{m}$ . Bifrost only uses the AP in NLoS (i.e., AP1) to function, and its median error is $0.69\mathrm{m}$ , which is $42.98\%$ less than that of SpotFi. + +These two groups of experiments demonstrate that Bifrost provides relatively stable performance when the WiFi AP is in LoS and NLoS scenarios. In NLoS scenarios, Bifrost can achieve much more accurate performance than SpotFi. + +Performance Enhancement when Bifrost Aids SpotFi. Next, we deploy Bifrost where SpotFi shows poor accuracy to see if Bifrost can aid SpotFi to improve localization accuracy. Actually, it is impossible to deploy Bifrost everywhere, so we choose the meeting room and classroom where localization accuracy is heavily affected by constrained APs and reports the worst results. Specifically, when the target gets into these two scenarios, its location will be reported by Bifrost. Otherwise, the target keeps using SpotFi for indoor localization. + +As shown in Fig. 16, the median localization error is $1.13\mathrm{m}$ when Bifrost aids SpotFi, achieving $33.54\%$ error reduction compared with SpotFi operating independently in all scenarios. This indicates + +![](images/352097b5d2f11c8cdaf6dd1ceacd68ed9b900106b469d063d16f21faf358a35f.jpg) +Figure 15: Performance of Bifrost Figure 16: Performance enhance-Figure 17: Ablation study on the and SpotFi in the NLoS scenario. ment brought by Bifrost. localization algorithm. + +![](images/8440172285ba16eaeb2c520a87c7567b6450151cb2e9e25468a2c653a068c295.jpg) + +![](images/67c7cfcad83ea3c2c70ebae1a6e28f6ac8a004921890e9bd31e0f8baa85750ab.jpg) +Figure 18: Impact of the multipath effect. + +![](images/46dc08a5fc2398f6e3fd535ac2a3b8f880e1ed26ef679898911b6677925990eb.jpg) + +that Bifrost can not only work independently, but also enhance localization accuracy of existing localization techniques. + +# 4.3 Ablation Study + +There are three crucial sub-modules in Bifrost's localization algorithm, that is, identifying the frequencies of CP signals (module 1 presented in §3.3), filtering out the multipath signal (module 2 presented in §3.4) and purifying the LoS signal for localization (module 3 presented in §3.5). We conduct an ablation study to evaluate the contribution of each sub-module to localization accuracy. The evaluation is conducted under four settings, S1: without any sub-module, S2: only with module 1, S3: with modules 1 and 2, and S4: with all three modules. + +Fig. 17 reports the results of this ablation study. If we do nothing and directly extract frequencies from raw amplitude data of CSI, the median localization error will surge to $3.31\mathrm{m}$ (S1). Instead, once the LP WiFi signal is filtered out, the frequencies of CP signals can be highlighted, which results in the median localization error of $1.51\mathrm{m}$ (S2). Further, the results of S3 and S4 show that the median error will be reduced to around $0.93\mathrm{m}$ and $0.81\mathrm{m}$ if we filter out the multipath signal and purify the LoS signal. These results show the necessity and contribution of each module in our design. + +# 4.4 Impacting Factors + +Next, we analyze the impact of three different factors on the performance of Bifrost, that is, multipath in the environment, the transmission power, as well as the distance between LWAs and WiFi AP. + +Multipath. We examine the AoA estimation accuracy of Bifrost in multipath scenarios. We fix the positions of LWAs and the target, then change the number of indoor objects (i.e., chairs and desks) to create different degrees of multipath. Specifically, two desks are first set in the room to emulate a light multipath environment, and then ten chairs are further added to produce richer signal reflections. The results in Fig. 18 indicate that the AoA estimation accuracy degrades as the multipath is intensified, where the median angle error initially sits around $3.8^{\circ}$ , and then increases to around $6.7^{\circ}$ . The more multipath exists, the more sparsely clustered subcarriers $C_s$ are formed. Thus, when these clusters are stacked with each other to form a wider cluster, there is a certain chance for our + +algorithm to misidentify the wrong LoS signal, causing greater errors in AoA estimation. + +We also note that Bifrost maintains relatively stable performance across different polarizations. The difference between median errors of LHCP and RHCP signals is less than $0.3^{\circ}$ , which underscores the robustness of our proposed LWA and localization algorithm. + +Transmission Power. The default transmission power of AP is 20dBm in our above-mentioned evaluations, and we now vary this value to investigate its impact on localization performance. Moreover, as mentioned before, we can't always guarantee that the WiFi AP establishes LoS path with LWAs, so we also compare the situation of the AP at LoS and NLoS scenarios in each setting of transmission power. We place AP at 2m distance outside the door and the target 2m inside the door, switching between the LoS and NLoS scenarios by opening and closing the door. Results in Fig. 19 show that decreasing the transmission power leads to an increase in the localization error, regardless of whether the AP is at LoS or NLoS. Besides, the errors in LoS scenario are always lower than that of NLoS for the same transmission power. These findings indicate the negative impact on localization performance that NLoS can have. + +However, we also observe that as the transmission power increases, the impact of NLoS on the performance of Bifrost decreases, albeit gradually. Notably, when the transmission power is set at $20\mathrm{dBm}$ , the median errors are $0.61\mathrm{m}$ and $0.73\mathrm{m}$ at LoS and NLoS scenarios, respectively. In practical scenarios, this performance is sufficient to meet the requirements of most location-based applications. + +Distance between AP and LWAs. The performance of Bifrost may be influenced by the energy of the input WiFi signal fed into LWAs, because it determines the SNR (signal-to-noise ratio) of the FSDM signal. The energy of the input WiFi signal is mainly related to two factors, namely the transmission power and the distance between the AP and LWAs. While the former factor is previously discussed, we here probe into the impact of distance. We carry out the experiments along the corridor and remove the reflectors as far as possible, while the distance is set to $2.5\mathrm{m}$ , $5\mathrm{m}$ , $7.5\mathrm{m}$ , and $10\mathrm{m}$ . Results in Fig. 20 demonstrate that the localization error increases with distance and may even result in outliers. The median errors are $0.63\mathrm{m}$ , $0.65\mathrm{m}$ , and $0.93\mathrm{m}$ in the first three groups of experiments, + +![](images/16a38231d650bb86b5ff8ee17331187aa7c7d79c58ccc45b049d167c2786c7dd.jpg) +Figure 19: Impact of the transmis- Figure 20: Impact of the distance Figure 21: Impact on the AP and Figure 22: Impact on other WiFi con-sion power. between AP and LWAs. the target of Bifrost. nections. + +![](images/2ce18e1841f8f48f59c2e0552990819ca20516f63df55589e295f5240d1087a7.jpg) + +![](images/5f2a96e0c1af772f0330b928316bb4518b1496e40fd924527949102072aa6cf5.jpg) + +![](images/41240c86f3dc7ef92bd433d9ea054df14aee43845c5886815c7a746fcfd18272.jpg) + +all of which are below $1\mathrm{m}$ , yet spike to $1.49\mathrm{m}$ in the setting of $10\mathrm{m}$ distance. Despite this, the range of $7.5\mathrm{m}$ is sufficient to cover most rooms in a typical building, thus ensuring the feasibility of Bifrost's function. + +# 4.5 Impact on Communication + +In this section, we evaluate the impact of deploying Bifrost on the WiFi connections, including the connection between the AP and the target as well as other connections. Firstly, we control the AP to transmit 1000 packets at a $50~ms$ interval, and the packet loss rate is recorded in each group of experiments. The results in Fig. 21 show that the median packet loss rates are $3.92\%$ and $3.71\%$ when the LWA is on and off, respectively. This $0.2\%$ difference implies that the function of Bifrost has a negligible influence on the AP-target communication. + +Secondly, we place Bifrost's transceiver at an intersection region covered by two commercial APs (AP1 in a classroom and AP2 in a laboratory) with good signal quality. We then use different off-the-shelf smartphones to establish WiFi connections with these APs and record the variation in throughput over 2 hours for each connection (C1: OnePlus 9-AP1, C2: iPhone 13-AP2, C3: OnePlus 9-AP1, and C4: iPhone-13-AP2). The results are shown in Fig. 22. We find that the median throughput degrades $2.7\%$ and $0.4\%$ in C1 and C3, which have nearly no impact on the network quality or user experience. Interestingly, the throughput increases when the LWAs are turned on for C2 and C4. We attribute this increase to the statistical error that is mainly caused by changes in network quality and wireless channels. + +# 4.6 Summary of Evaluation + +Based on the above evaluations on Bifrost, the following summary can be drawn: + +1) The median localization error of Bifrost is $0.81\mathrm{m}$ , which is $52.35\%$ less than that of SpotFi in arguably realistic indoor settings. +2) Bifrost can be deployed in scenarios without enough APs to help SpotFi enhance performance, reducing the overall localization error of SpotFi by $33.54\%$ . +3) Distance between LWAs and APs, multipath and transmission power influence Bifrost's performance differently, yet the absolute accuracy never degrades drastically. + +4) The deployment of Bifrost has a negligible impact on the communication quality of either the link between the AP and the target or other WiFi connections. + +# 5 DISCUSSION + +In this section, we discuss practical issues concerning the applicability and efficacy of Bifrost. + +Complexity of Deployment. Deploying Bifrost can be easy and straightforward via two steps: stick LWAs to the wall, and measure LWAs' coordinates. Compared with most existing indoor localization methods, Bifrost works in a plug-and-play manner, requiring neither complex configurations nor additional operations on APs and the target. + +FoV and Coverage of LWAs. Bifrost achieves $22^{\circ}$ FoV in the current prototype by using 160MHz bandwidth (5.17GHz - 5.33GHz). The FoV and coverage can be expanded by using the entire WiFi band, including frequencies at 2.4GHz, 5.2GHz, and 5.8GHz [47]. This expansion is feasible because most existing WiFi devices have supported dual- or tri-band functionality. + +Applicability. Considering that most of the current commercial WiFi devices are equipped with LP antennas, they may be not compatible with Bifrost yet. There are two potential solutions to enhance the applicability of Bifrost. On one hand, some commercial off-the-shelf CP antennas (e.g., CP flat patch antennas [45] of L-com, Inc) are developed to be integrated with existing WiFi APs. Bifrost can be deployed on such devices. On the other hand, in our future work, we will study how to utilize LP rather than CP signals to improve the applicability of Bifrost. To distinguish LWAs using the LP signals, different phase shifts or OOK patterns may be exploited. + +Besides, the indoor obstacles may also influence the applicability of Bifrost. The reason is that the localization performance will degrade if the LoS paths between LWAs and the target are blocked by the obstacles. Therefore, one may select proper positions to deploy LWAs to avoid NLoS propagation to the target to be localized. However, the LoS path between LWAs and the WiFi AP isn't a precondition. As long as the LWAs can receive the signal from the WiFi AP, Bifrost can work. + +Lifetime and Maintenance Cost. The rated current of LWAs is $0.86\mathrm{mA}$ . A LWA is powered with a 1600mAh battery and works + +at $20\%$ duty cycle. So the estimated lifetime of a LWA is over 9302 hours ( $\approx$ 387 days) and the maintenance cost is recharging the battery once every 387 days. + +Potential Interference. One may be concerned that if multiple LWAs are deployed closely, LWAs with the same polarization will interfere with each other. However, each room only has one RHCP LWA and one LHCP LWA in the setting of Bifrost, so LWAs with the same polarization are separated by walls. Interference signals must propagate through the wall, after which they only have low strength. Therefore, different pairs of LWAs hardly interfere with each other. + +# 6 RELATED WORK + +In this section, we briefly summarize existing works in the fields related to our work. + +# 6.1 Application of LWA + +The work closest to ours is 123-LOC [42], which presents a THz LWA with two perpendicular slots to radiate horizontal and vertical polarized FSDM signals. Range and angle estimation is then performed by the receiver based on the bandwidth and frequencies of received signals. In comparison, Bifrost reduces the impact of multipath and achieves room-scale localization, which is a challenging task for THz signals. + +LeakyTrack [21] tracks the object between two LWAs based on the observation that nodal and environmental motion changes the received spectral profile of FSDM signals. [76] investigates the security of THz networks with LWAs and shows that FSDM signals of the LWA can hinder eavesdroppers, e.g., by using a wide-band transmission. [20] and [22] study single-shot link discovery with the help of FSDM signals from the LWA. A receiver can discover the direction of the path from the transmitter in one shot. In contrast to those works that require a specific feeding device for THz LWA, Bifrost operates in the WiFi band and works in a plug-and-play manner, providing better applicability and convenience. Additionally, Bifrost addresses relevant challenges, including multipath, noise and ambiguity, by delicately designing the hardware and localization algorithm. + +# 6.2 WiFi-based Indoor Localization + +There have been numerous efforts on indoor localization with WiFi [16, 49, 61, 68-70, 84]. Traditional fingerprint-based techniques have been widely used by mapping the RSS readings from multiple APs with locations [46, 66]. Techniques based on AoA and ToF have become more prevalent recently. For example, ArrayTrack [69] proposes an AoA-based WiFi localization system that incorporates multiple APs and the Multiple Signal Classification (MUSIC) algorithm. SpotFi [43] proposes a MUSIC algorithm to obtain AoA and ToF simultaneously. The $M^3$ system [16] reduces the amount of APs to only one by utilizing multipath signals and frequency hopping among multiple channels. + +Despite such inspiring advances, the existing proposals may chop up the communication link between the target and the AP when the target hops between different APs or channels. In contrast, Bifrost does not interfere with the communication link, which + +supplements the APs' localization ability, without compromising their communication ability. + +# 6.3 Polarization of the Wireless Signal + +LLAMA [15] designs a metasurface to mitigate polarization mismatch by rotating the polarization of wireless signals, which is achieved by applying the bias voltage to the orthogonal compo + +nents (like $\overrightarrow{E_x}$ and $\overrightarrow{E_y}$ shown in Fig. 4) of input signals. RoS [55] and mmTag [51] propose well-designed Van Atta arrays. They all change the polarization of input mmWave signals to the orthogonal one to deal with the self-interference between the incoming signals and the backscattered signals. IntuWition [77] observes that different materials can reflect and scatter the incoming polarized signals in different ways, based on which it exploits the technique to classify various materials. SiWa [83] utilizes the similar principle to inspect the wall structure without undermining the structural integrity. + +The above-mentioned works mainly focus on mutable LP signals. Bifrost instead explores the use of orthogonal CP signals, providing more robust performance. + +# 6.4 Backscatter-aided Localization + +Enabled by the backscatter technology [8, 26, 29-31, 37, 53, 55, 58], many novel applications are enabled, one of which is localization. Both Hawkeye [8] and Millimetro [58] design backscatter tags based on Van Atta arrays to enhance the energy of backscatter signals, so they can localize tags in long range (over $100\mathrm{m}$ ). By assigning unique OOK modulation frequencies to different tags, those two works can also identify and localize tags simultaneously. Moreover, RFID technology [34-36, 39-41] has been widely used in localization tasks. As a typical backscatter technology, RFID can modulate information via the RFID tags. Then, RFID reader can usually infer the range or orientation to the tags by analyzing the phase variation of the backscatter signals. + +Compared to those works, Bifrost utilizes tags (i.e., LWAs) to create FSDM signals to localize another target, rather than the tag itself. + +# 7 CONCLUSION + +This paper introduces Bifrost, a low-cost and plug-and-play technique to enhance the availability and accuracy of WiFi localization. It can either aid existing techniques to improve their performance, or operate independently to outperform the state of the arts in arguably realistic indoor settings, without affecting ongoing data communication of WiFi networks. What sets Bifrost apart from other solutions is the exploration in the polarization of wireless signals and the dispersion property of LWAs, which embodies the concept of RF computing [15, 29, 53, 55]. We plan to explore the research space further in this direction. + +# ACKNOWLEDGMENTS + +We thank our anonymous shepherd and reviewers for their insightful comments. This work is partially supported by the National Natural Science Foundation of China under grant No. U21B2007, and the Guoqiang Institute of Tsinghua University under grant No. 2021GQG1002. + +# REFERENCES + +[1] 2021. IEEE Standard for Information Technology-Telecommunications and Information Exchange between Systems Local and Metropolitan Area Networks-Specific Requirements Part 11: Wireless LAN Medium Access Control (MAC) and Physical Layer (PHY) Specifications Amendment 1: Enhancements for High-Efficiency WLAN. IEEE Std 802.11ax-2021 (Amendment to IEEE Std 802.11-2020) (2021). +[2] Afaz Uddin Ahmed, Reza Arablouei, Frank De Hoog, Branislav Kusy, and Raja Jurdak. 2019. Multi-radio Data Fusion for Indoor Localization Using Bluetooth and WiFi. In Proceedings of the 9th International Conference on Pervasive and Embedded Computing and Communication Systems: Volume 1: PECC. +[3] Amazon. 2023. Amazon NETGEAR 4-Stream WiFi 6 Router. https://www.amazon.com/NETGEAR-4-Stream-WiFi-Router-R6700AX/dp/B08KTXG8Q5/ref=sr_1_5?keywords=wifi+router&qid=1687784198&sr=8-5. (2023). Accessed: 2023-06-26. +[4] Amazon. 2023. Amazon Tenda AC1200 Smart WiFi Router. https://www.amazon.com/Tenda-Wireless-Internet-MU-MIMO-AC6/dp/B06X1CHFJ5/ref=sr_1_51?keywords $\equiv$ wifi+router&qid $=$ 1687784310&sr $= 8 - 51$ (2023).Accessed:2023-06-26. +[5] Amazon. 2023. Amazon TP-Link AC1200 WiFi Router. https://www.amazon.com/TP-Link-AC1200-Router-Archer-A54/dp/B09G5Y1HWZ/ref=sr_1_1?keywords=wifi+router&qid=1687784198&sr=8-1. (2023). Accessed: 2023-06-26. +[6] Amazon. 2023. Amazon TP-Link Smart WiFi 6 Router. https://www.amazon.com/TP-Link-Wireless-AX1500-Wifi-Router/dp/B07ZSDR49S/ref=sr_1_3?keywords=wifi+router&qid=1687784198&sr=8-3. (2023). Accessed: 2023-06-26. +[7] Roshan Ayyalasomayajula, Aditya Arun, Chenfeng Wu, Sanatan Sharma, Abhishek Rajkumar Sethi, Deepak Vasisht, and Dinesh Bharadia. 2020. Deep Learning Based Wireless Localization for Indoor Navigation. In Proceedings of the 26th Annual International Conference on Mobile Computing and Networking (MobiCom). +[8] Kang Min Bae, Hankyeol Moon, Sung-Min Sohn, and Song Min Kim. 2023. Hawkeye: Hectometer-range Subcentimeter Localization for Large-scale mmWave Backscatter. In Proceedings of the 21st Annual International Conference on Mobile Systems, Applications and Services (MobiSys). +[9] Atul Bansal, Akshay Gadre, Vaibhav Singh, Anthony Rowe, Bob Iannucci, and Swarun Kumar. 2021. Owll: Accurate LoRa Localization Using the TV Whitespaces. In Proceedings of the 20th International Conference on Information Processing in Sensor Networks (IPSN). +[10] Yuanxi Cao and Sen Yan. 2021. A Low-profile High-gain Multi-beam Antenna based on 3D-printed Cylindrical Luneburg Lens. Microwave and Optical Technology Letters 63, 7 (2021). +[11] Yuanxi Cao and Sen Yan. 2021. Multi-beam SIW Leaky-wave Antenna with 2-D Beam Scanning Capability for Millimeter-wave Radar Applications. International Journal of RF and Microwave Computer-aided Engineering 31, 5 (2021). +[12] Yuanxi Cao, Sen Yan, and Juan Chen. 2023. An SIW Pillbox-based Compact Dual-polarized Multibeam Antenna with Passive 2-D Beam Scanning Capability. IEEE Transactions on Circuits and Systems II: Express Briefs 70, 1 (2023). +[13] Yuanxi Cao, Sen Yan, Wendong Liu, and Jianxing Li. 2023. A Wideband Multibeam Pillbox Antenna Based on Differentially Fed Leaky-wave Array. IEEE Antennas and Wireless Propagation Letters 22, 3 (2023). +[14] Roberto Carvalho, Shan-Ho Yang, Yao-Hua Ho, and Ling-Jyh Chen. [n.d.]. Indoor Localization Using FM and DVB-T Signals. In Proceedings of the 2016 13th IEEE Annual Consumer Communications & Networking Conference (CCNC). +[15] Lili Chen, Wenjun Hu, Kyle Jamieson, Xiaojiang Chen, Dingyi Fang, and Jeremy Gummeson. [n. d.]. Pushing the Physical Limits of IoT Devices with Programmable Metasurfaces. In Proceedings of the 18th USENIX Symposium on Networked Systems Design and Implementation (NSDI). +[16] Zhe Chen, Guorong Zhu, Sulei Wang, Yuedong Xu, Jie Xiong, Jin Zhao, Jun Luo, and Xin Wang. 2019. $M^3$ : Multipath Assisted Wi-Fi Localization with a Single Access Point. IEEE Transactions on Mobile Computing 20, 2 (2019). +[17] COMFAST. 2023. CF-AX210 PRO. http://www.comfast.com.cn/index.php?m=content&c=index&a=show&catid=13&id=123. (2023). Accessed: 2023-03-17. +[18] Pei Du and Nirupama Bulusu. 2021. An Automated AR-based Annotation Tool for Indoor Navigation for Visually Impaired People. In Proceedings of the 23rd International ACM SIGACCESS Conference on Computers and Accessibility. +[19] Pei Du and Nirupama Bulusu. 2022. Indoor Navigation for Visually Impaired People with Vertex Colored Graphs. In Proceedings of the 20th Annual International Conference on Mobile Systems, Applications and Services (MobiSys). +[20] Yasaman Ghasempour, Rabi Shrestha, Aaron Charous, Edward Knightly, and Daniel M Mittleman. 2020. Single-shot Link Discovery for Terahertz Wireless Networks. Nature Communications 11, 1 (2020). +[21] Yasaman Ghasempour, Chia-Yi Yeh, Rabi Shrestha, Yasith Amarasinghe, Daniel Mittleman, and Edward W. Knightly. 2020. LeakyTrack: Non-coherent Single-antenna Nodal and Environmental Mobility Tracking with a Leaky-wave Antenna. In Proceedings of the 18th Conference on Embedded Networked Sensor Systems + +(SenSys). +[22] Yasaman Ghasempour, Chia-Yi Yeh, Rabi Shrestha, Daniel Mittleman, and Edward Knightly. 2020. Single Shot Single Antenna Path Discovery in THz Networks. In Proceedings of the 26th Annual International Conference on Mobile Computing and Networking (MobiCom). +[23] Jon Gjengset, Jie Xiong, Graeme McPhillips, and Kyle Jamieson. 2014. Accurate Indoor Localization with Zero Start-up Cost. In Proceedings of the 20th Annual International Conference on Mobile Computing and Networking (MobiCom). +[24] Jon Gjengset, Jie Xiong, Graeme McPhillips, and Kyle Jamieson. 2014. Phaser: Enabling Phased Array Signal Processing on Commodity WiFi Access Points. In Proceedings of the 20th Annual International Conference on Mobile Computing and Networking (MobiCom). +[25] Baoshen Guo, Weijian Zuo, Shuai Wang, Wenjun Lyu, Zhiqing Hong, Yi Ding, Tian He, and Desheng Zhang. 2022. Wepos: Weak-supervised Indoor Positioning with Unlabeled WiFi for On-demand Delivery. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 6, 2 (2022), 1-25. +[26] Xiuzhen Guo, Yuan He, Zihao Yu, Jiacheng Zhang, Yunhao Liu, and Longfei Shangguan. 2022. RF-transformer: A Unified Backscatter Radio Hardware Abstraction. In Proceedings of the 28th Annual International Conference on Mobile Computing And Networking (MobiCom). +[27] Xiuzhen Guo, Yuan He, Xiaolong Zheng, Liangcheng Yu, and Omprakash Gnawali. 2018. ZigFi: Harnessing Channel State Information for Cross-Technology Communication. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM). +[28] Xiuzhen Guo, Yuan He, Xiaolong Zheng, Liangcheng Yu, and Omprakash Gnawali. 2020. ZigFi: Harnessing Channel State Information for Cross-Technology Communication. IEEE/ACM Transactions on Networking 28, 1 (2020), 301–311. +[29] Xiuzhen Guo, Longfei Shangguan, Yuan He, Nan Jing, Jiacheng Zhang, Haotian Jiang, and Yunhao Liu. 2022. Saiyan: Design and Implementation of a Low-power Demodulator for LoRa Backscatter Systems. In Proceedings of the 19th USENIX Symposium on Networked Systems Design and Implementation (NSDI). +[30] Xiuzhen Guo, Longfei Shangguan, Yuan He, Jia Zhang, Haotian Jiang, Awais Ahmad Siddiqi, and Yunhao Liu. 2020. Aloba: Rethinking ON-OFF Keying Modulation for Ambient LoRa Backscatter. In Proceedings of the 18th Conference on Embedded Networked Sensor Systems (SenSys). +[31] Xiuzhen Guo, Longfei Shangguan, Yuan He, Jia Zhang, Haotian Jiang, Awais Ahmad Siddiqi, and Yunhao Liu. 2021. Efficient Ambient LoRa Backscatter with On-Off Keying Modulation. IEEE/ACM Transactions on Networking 30, 2 (2021), 641-654. +[32] Yuan He, Weiguo Wang, Luca Mottola, Shuai Li, Yimiao Sun, Jinming Li, Hua Jing, Ting Wang, and Yulei Wang. 2023. Acoustic Localization System for Precise Drone Landing. IEEE Transactions on Mobile Computing (2023). +[33] David R Jackson, Christophe Caloz, and Tatsuo Itoh. 2012. Leaky-wave Antennas. Proc. IEEE 100, 7 (2012). +[34] Chengkun Jiang, Yuan He, Songzhen Yang, Junchen Guo, and Yunhao Liu. 2019. 3D-OmniTrack: 3D Tracking with COTS RFID Systems. In Proceedings of the 18th International Conference on Information Processing in Sensor Networks (IPSN). +[35] Chengkun Jiang, Yuan He, Xiaolong Zheng, and Yunhao Liu. 2018. Orientation-aware RFID Tracking with Centimeter-level Accuracy. In Proceedings of the 17th International Conference on Information Processing in Sensor Networks (IPSN). +[36] Chengkun Jiang, Yuan He, Xiaolong Zheng, and Yunhao Liu. 2019. OmniTrack: Orientation-aware RFID Tracking with Centimeter-level Accuracy. IEEE Transactions on Mobile Computing 20, 2 (2019), 634-646. +[37] Haotian Jiang, Jiacheng Zhang, Xiuzhen Guo, and Yuan He. 2021. Sense Me on the Ride: Accurate Mobile Sensing Over a LoRa Backscatter Channel. In Proceedings of the 19th ACM Conference on Embedded Networked Sensor Systems (SenSys). +[38] Zhiping Jiang, Tom H. Luan, Xincheng Ren, Dongtao Lv, Han Hao, Jing Wang, Kun Zhao, Wei Xi, Yueshen Xu, and Rui Li. 2022. Eliminating the Barriers: Demystifying Wi-Fi Baseband Design and Introducing the PicoScenes Wi-Fi Sensing Platform. IEEE Internet of Things Journal 9, 6 (2022). +[39] Meng Jin, Yuan He, Songzhen Yang, Yunhao Liu, Li Yan, and Yuji Sun. 2022. Versatile RFID-based Sensing: Model, Algorithm, and Applications. IEEE Transactions on Mobile Computing (2022). +[40] Meng Jin, Kexin Li, Xiaohua Tian, Xinbing Wang, and Chenghu Zhou. 2023. Fast, Fine-grained, and Robust Grouping of RFIDs. In Proceedings of the 29th Annual International Conference on Mobile Computing and Networking (MobiCom). +[41] Meng Jin, Shun Yao, Kexin Li, Xiaohua Tian, Xinbing Wang, Chenghu Zhou, and Xinde Cao. 2022. A Passive Eye-in-Hand" Camera" for Miniature Robots. In Proceedings of the 20th ACM Conference on Embedded Networked Sensor Systems (SenSys). +[42] Atsutse Kludze, Rabi Shrestha, Chowdhury Miftah, Edward Knightly, Daniel Mittleman, and Yasaman Ghasempour. 2022. Quasi-optical 3D Localization Using Asymmetric Signatures above 100 GHz. In Proceedings of the 28th Annual International Conference on Mobile Computing And Networking (MobiCom). +[43] Manikanta Kotaru, Kiran Joshi, Dinesh Bharadia, and Sachin Katti. 2015. SpotFi: Decimeter Level Localization Using WiFi. In Proceedings of the 2015 ACM Conference on Special Interest Group on Data Communication (SIGCOMM). + +[44] Vikram Kumar, Reza Arablouei, Raja Jurdak, Branislav Kusy, and Neil W Bergmann. 2017. RSSI-based Self-localization with Perturbed Anchor Positions. In Proceedings of the 2017 IEEE 28th Annual International Symposium on Personal, Indoor, and Mobile Radio Communications (PIMRC). +[45] L-com. 2023. Circular Polarized Patch Antenna. https://www.l-com.com/wireless-antenna-24-ghz-8-dbi-circular-polarized-rh-flat-patch-antennas. (2023). Accessed: 2023-10-03. +[46] Danyang Li, Jingao Xu, Zheng Yang, Chenshu Wu, Jianbo Li, and Nicholas D Lane. 2021. Wireless Localization with Spatial-temporal Robust Fingerprints. ACM Transactions on Sensor Networks 18, 1 (2021), 1-23. +[47] Tianxiang Li, Haofan Lu, Reza Rezvani, Ali Abedi, and Omid Abari. 2022. Bringing WiFi Localization to Any WiFi Devices. In Proceedings of the 21st ACM Workshop on Hot Topics in Networks (HotNets). +[48] Tianxiang Li, Mohammad Hossein Mazaheri, and Omid Abari. 2022. 5g in the Sky: The Future of High-speed Internet via Unmanned Aerial Vehicles. In Proceedings of the 23rd Annual International Workshop on Mobile Computing Systems and Applications. +[49] Xiang Li, Daqing Zhang, Qin Lv, Jie Xiong, Shengjie Li, Yue Zhang, and Hong Mei. 2017. IndoTrack: DeviceFree Indoor Human Tracking with Commodity Wi-Fi. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 1, 3 (2017). +[50] Bo Liang, Purui Wang, Renjie Zhao, Heyu Guo, Pengyu Zhang, Junchen Guo, Shunmin Zhu, Hongqiang Harry Liu, Xinyu Zhang, and Chenren Xu. 2023. RF-Chord: Towards Deployable RFID Localization System for Logistic Networks. In Proceedings of the 20th USENIX Symposium on Networked Systems Design and Implementation (NSDI). +[51] Mohammad Hossein Mazaheri, Alex Chen, and Omid Abari. 2021. mmTag: A Millimeter Wave Backscatter Network. In Proceedings of the 2021 ACM SIGCOMM 2021 Conference (SIGCOMM). +[52] Francesco Monticone and Andrea Alu. 2015. Leaky-wave Theory, Techniques, and Applications: From Microwaves to Visible Frequencies. Proc. IEEE 103, 5 (2015). +[53] Xin Na, Xiuzhen Guo, Zihao Yu, Jia Zhang, Yuan He, and Yunhao Liu. 2023. Leggiero: Analog WiFi Backscatter with Payload Transparency. In Proceedings of the 21st Annual International Conference on Mobile Systems, Applications and Services (MobiSys). +[54] Sujay Narayana, Vijay Rao, R Venkatesha Prasad, Ajay K Kanthila, Kavya Managundi, Luca Mottola, and T Venkata Prabhakar. 2020. LOCI: Privacy-aware, Device-free, Low-power Localization of Multiple Persons Using IR Sensors. In Proceedings of the 19th International Conference on Information Processing in Sensor Networks (IPSN). +[55] John Nolan, Kun Qian, and Xinyu Zhang. 2021. RoS: Passive Smart Surface for Roadside-to-Vehicle Communication. In Proceedings of the 2021 ACM SIGCOMM 2021 Conference (SIGCOMM). +[56] Yuanchao Shu, Zhuqi Li, Borje Karlsson, Yiyong Lin, Thomas Moscibroda, and Kang Shin. [n. d.]. incrementally-deployable Indoor Navigation with Automatic Trace Generation. In Proceedings of IEEE International Conference on Computer Communications. +[57] Elahe Soltanaghaei, Avinash Kalyanaraman, and Kamin Whitehouse. 2018. Multipath Triangulation: Decimeter-level WiFi Localization and Orientation with a Single Unaided Receiver. In Proceedings of the 16th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys). +[58] Elahe Soltanaghaei, Akarsh Prabhakara, Artur Balanuta, Matthew Anderson, Jan M Rabaey, Swarun Kumar, and Anthony Rowe. 2021. Millimetre: mmWave Retro-reflective Tags for Accurate, Long Range Localization. In Proceedings of the 27th Annual International Conference on Mobile Computing and Networking (MobiCom). +[59] Yimiao Sun, Weiguo Wang, Luca Mottola, Ruijin Wang, and Yuan He. 2022. AIM: Acoustic Inertial Measurement for Indoor Drone Localization and Tracking. In Proceedings of the 20th Conference on Embedded Networked Sensor Systems (SenSys). +[60] Huy Tran, Abhishek Mukherji, Nirupama Bulusu, Santosh Pandey, and Xu Zhang. 2019. Improving Infrastructure-based Indoor Positioning Systems with Device Motion Detection. In Proceedings of the 2019 IEEE International Conference on Pervasive Computing and Communications (PerCom). +[61] Ju Wang, Hongbo Jiang, Jie Xiong, Kyle Jamieson, Xiaojiang Chen, Dingyi Fang, and Binbin Xie. 2016. LiFS: Low Human-effort, Device-free Localization with Fine-grained Subcarrier Information. In Proceedings of the 22nd Annual International Conference on Mobile Computing and Networking (MobiCom). +[62] Weiguo Wang, Yuan He, Meng Jin, Yimiao Sun, and Xiuzhen Guo. 2023. Meta-Speaker: Acoustic Source Projection by Exploiting Air Nonlinearity. In Proceedings of the 29st Annual International Conference on Mobile Computing and Networking (MobiCom). +[63] Weiguo Wang, Luca Mottola, Yuan He, Jinming Li, Yimiao Sun, Shuai Li, Hua Jing, and Yulei Wang. 2022. MicNest: Long-range Instant Acoustic Localization + +of Drones in Precise Landing. In Proceedings of the 20th Conference on Embedded Networked Sensor Systems (SenSys). +[64] Yongyong Wei and Rong Zheng. 2020. Handling Device Heterogeneity in Wi-Fi Based Indoor Positioning Systems. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM). +[65] Yongyong Wei and Rong Zheng. 2021. Efficient Wi-Fi Fingerprint Crowdsourcing for Indoor Localization. IEEE Sensors Journal 22, 6 (2021), 5055-5062. +[66] Chenshu Wu, Jingao Xu, Zheng Yang, Nicholas D Lane, and Zuwei Yin. 2017. Gain without Pain: Accurate WiFi-based Localization Using Fingerprint Spatial Gradient. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 1, 2 (2017), 1-19. +[67] Chenshu Wu, Zheng Yang, Zimu Zhou, Yunhao Liu, and Mingyan Liu. 2016. Mitigating Large Errors in WiFi-based Indoor Localization for Smartphones. IEEE Transactions on Vehicular Technology 66, 7 (2016), 6246-6257. +[68] Yaxiong Xie, Jie Xiong, Mo Li, and Kyle Jamieson. 2019. md-Track: Leveraging Multi-dimensionality for Passive Indoor Wi-Fi Tracking. In Proceedings of the 25th Annual International Conference on Mobile Computing and Networking (MobiCom). +[69] Jie Xiong and Kyle Jamieson. 2013. ArrayTrack: A Fine-grained Indoor Location System. In Proceedings of the 10th USENIX Symposium on Networked Systems Design and Implementation (NSDI). +[70] Jie Xiong, Karthikeyan Sundaresan, and Kyle Jamieson. 2015. ToneTrack: Leveraging Frequency-agile Radios for Time-based Indoor Wireless Localization. In Proceedings of the 21st Annual International Conference on Mobile Computing and Networking (MobiCom). +[71] Feng Xu and Ke Wu. 2013. Understanding Leaky-wave Structures: A Special Form of Guided-wave Structure. IEEE Microwave Magazine 14, 5 (2013). +[72] Han Xu, Zheng Yang, Zimu Zhou, Ke Yi, and Chunyi Peng. [n. d]. Tum: Towards Ubiquitous Multi-device Localization for Cross-device Interaction. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM). +[73] Kun Yang, Xiaolong Zheng, Jie Xiong, Liang Liu, and Huadong Ma. 2022. Wilmg: Pushing the Limit of WiFi Sensing with Low Transmission Rates. In Proceedings of the 19th Annual IEEE International Conference on Sensing, Communication, and Networking (SECON). +[74] Yu Yang, Yi Ding, Dengpan Yuan, Guang Wang, Xiaoyang Xie, Yunhuai Liu, Tian He, and Desheng Zhang. 2020. Transloc: Transparent Indoor Localization with Uncertain Human Participation for Instant Delivery. In Proceedings of the 26th Annual International Conference on Mobile Computing and Networking (MobiCom). +[75] Zheng Yang, Zimu Zhou, and Yunhao Liu. 2013. From RSSI to CSI: Indoor Localization via Channel Response. Comput. Surveys 46, 2 (2013). +[76] Chia-Yi Yeh, Yasaman Ghasempour, Yasith Amarasinghe, Daniel M Mittleman, and Edward W Knightly. 2020. Security in Terahertz WLANs with Leaky Wave Antennas. In Proceedings of the 13th ACM Conference on Security and Privacy in Wireless and Mobile Networks (WiSec). +[77] Diana Zhang, Jingxian Wang, Junsu Jang, Junbo Zhang, and Swarun Kumar. 2019. On the Feasibility of Wi-Fi Based Material Sensing. In Proceedings of the 25st Annual International Conference on Mobile Computing and Networking (MobiCom). +[78] Jia Zhang, Xiuzhen Guo, Haotian Jiang, Xiaolong Zheng, and Yuan He. 2020. Link Quality Estimation of Cross-technology Communication. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM). 496-505. +[79] Jia Zhang, Xin Na, Rui Xi, Yimiao Sun, and Yuan He. 2023. mmHawkeye: Passive UAV Detection with a COTS mmWave Radar. In Proceedings of the 20th Annual IEEE International Conference on Sensing, Communication, and Networking (SECON). +[80] Jia Zhang, Rui Xi, Yuan He, Yimiao Sun, Xiuzhen Guo, Weiguo Wang, Xin Na, Yunhao Liu, Zhenguo Shi, and Tao Gu. 2023. A Survey of mmWave-based Human Sensing: Technology, Platforms and Applications. IEEE Communications Surveys & Tutorials (2023). +[81] Xianan Zhang, Wei Wang, Xuedou Xiao, Hang Yang, Xinyu Zhang, and Tao Jiang. 2020. Peer-to-Peer Localization for Single-antenna Devices. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 4, 3 (2020), 1-25. +[82] Zhenyong Zhang, Shibo He, Yuanchao Shu, and Zhiguo Shi. 2019. A Self-evolving WiFi-based Indoor Navigation System Using Smartphones. IEEE Transactions on Mobile Computing 19, 8 (2019), 1760-1774. +[83] Tianyue Zheng, Zhe Chen, Jun Luo, Lin Ke, Chaoyang Zhao, and Yaowen Yang 2021. SiWa: See into Walls via Deep UWB Radar. In Proceedings of the 27th Annual International Conference on Mobile Computing and Networking (MobiCom). +[84] Xiaolong Zheng, Jiliang Wang, Longfei Shangguan, Zimu Zhou, and Yunhao Liu 2016. Smokey: Ubiquitous Smoking Detection with Commercial WiFi Infrastructures. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM). \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06311/images/1187de196e827f33d1610677a39a09bcda34961935e40152fb7ccba567ea5298.jpg b/data/2025/2504_06xxx/2504.06311/images/1187de196e827f33d1610677a39a09bcda34961935e40152fb7ccba567ea5298.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b1db2f961248f79fbc444c944213bf073ca102d7 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/1187de196e827f33d1610677a39a09bcda34961935e40152fb7ccba567ea5298.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2559a7290ff3396ab2f831140df32a1a78d5806fc6e9c824aa0a48a2fda77fd +size 3409 diff --git a/data/2025/2504_06xxx/2504.06311/images/13b58135f6e1b371566e5ed5acca1a3ad836d23cf639ccd43f0c1935dc9868e1.jpg b/data/2025/2504_06xxx/2504.06311/images/13b58135f6e1b371566e5ed5acca1a3ad836d23cf639ccd43f0c1935dc9868e1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dbcfe7435aab7f275e160b1aed283ba7f3ad2018 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/13b58135f6e1b371566e5ed5acca1a3ad836d23cf639ccd43f0c1935dc9868e1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae7a738311fa7a12f1d2cdc59498908ad75d3b08fe0a005242e9880717488058 +size 19619 diff --git a/data/2025/2504_06xxx/2504.06311/images/16a38231d650bb86b5ff8ee17331187aa7c7d79c58ccc45b049d167c2786c7dd.jpg b/data/2025/2504_06xxx/2504.06311/images/16a38231d650bb86b5ff8ee17331187aa7c7d79c58ccc45b049d167c2786c7dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d2885440596bd9cb8b9edde8b07ab896e7d2703e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/16a38231d650bb86b5ff8ee17331187aa7c7d79c58ccc45b049d167c2786c7dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa16c5ddcf15087dde3cc32d220b9633dae2a0fcd1f1fbc1e41ae4639d869691 +size 19727 diff --git a/data/2025/2504_06xxx/2504.06311/images/20417a2bd2ea76cbcf6c5dd2a19149fcaedc99406da14a30b7f2041a0516dc57.jpg b/data/2025/2504_06xxx/2504.06311/images/20417a2bd2ea76cbcf6c5dd2a19149fcaedc99406da14a30b7f2041a0516dc57.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2dabb8b1e80f9dd3a872e819fe794c4f96cc28d6 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/20417a2bd2ea76cbcf6c5dd2a19149fcaedc99406da14a30b7f2041a0516dc57.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5e868bdd1e17494daec5e352b308dd3e1ce9fa383dade7810e0a5d666f124b2 +size 4912 diff --git a/data/2025/2504_06xxx/2504.06311/images/20c13b4e9eca0eb9f70a5bb01dab7d7b75d52113724c5af0424c18db95b25e49.jpg b/data/2025/2504_06xxx/2504.06311/images/20c13b4e9eca0eb9f70a5bb01dab7d7b75d52113724c5af0424c18db95b25e49.jpg new file mode 100644 index 0000000000000000000000000000000000000000..36fae8bc515978999de23176ef0f86bf975aa9ca --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/20c13b4e9eca0eb9f70a5bb01dab7d7b75d52113724c5af0424c18db95b25e49.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e76739ee2a189d5eb39bf2af1473593c39664cf7bcd5f63ed9bb15c75feb70e7 +size 19424 diff --git a/data/2025/2504_06xxx/2504.06311/images/2109f2078048336a2c4a81a265742db1ac720ab5b8a945f966c7e74d9179e936.jpg b/data/2025/2504_06xxx/2504.06311/images/2109f2078048336a2c4a81a265742db1ac720ab5b8a945f966c7e74d9179e936.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ad6e4e7abc314b3fc49e11ed36d06cf63c4523a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/2109f2078048336a2c4a81a265742db1ac720ab5b8a945f966c7e74d9179e936.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c911e051e4df9cd314e5a63dc1281d3cf219845e677b16d81662c697ffb35a5 +size 8909 diff --git a/data/2025/2504_06xxx/2504.06311/images/279daa2a6c56e4e94198173bb12843489cab9266b1f6d6bb7f2e5b98f6a29900.jpg b/data/2025/2504_06xxx/2504.06311/images/279daa2a6c56e4e94198173bb12843489cab9266b1f6d6bb7f2e5b98f6a29900.jpg new file mode 100644 index 0000000000000000000000000000000000000000..93ab373ac3e9e07e3190c275ff54401aef6a8b08 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/279daa2a6c56e4e94198173bb12843489cab9266b1f6d6bb7f2e5b98f6a29900.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a10a8afbd61aab0091ff666396b63e3c4c981129539b76c5f4c9184d53d0d787 +size 16156 diff --git a/data/2025/2504_06xxx/2504.06311/images/2ce18e1841f8f48f59c2e0552990819ca20516f63df55589e295f5240d1087a7.jpg b/data/2025/2504_06xxx/2504.06311/images/2ce18e1841f8f48f59c2e0552990819ca20516f63df55589e295f5240d1087a7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bae82de343b65974c74ab52ceac2a15bbfa755c8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/2ce18e1841f8f48f59c2e0552990819ca20516f63df55589e295f5240d1087a7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:019022dc91fa07eb96bbea0f3da92b65587c28abfe66da0911e833253e7ebdce +size 22054 diff --git a/data/2025/2504_06xxx/2504.06311/images/352097b5d2f11c8cdaf6dd1ceacd68ed9b900106b469d063d16f21faf358a35f.jpg b/data/2025/2504_06xxx/2504.06311/images/352097b5d2f11c8cdaf6dd1ceacd68ed9b900106b469d063d16f21faf358a35f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dad2758977e89e21527b61eee29d2f44e48f4dbc --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/352097b5d2f11c8cdaf6dd1ceacd68ed9b900106b469d063d16f21faf358a35f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba47b1ccaaed7287331c5711def1712579169672d4115d01ee353f6cf012e647 +size 19424 diff --git a/data/2025/2504_06xxx/2504.06311/images/36b40804dcad7a772c81b3692c34777cfdd86cb7e5c8e87d5c00d280bc55e7a9.jpg b/data/2025/2504_06xxx/2504.06311/images/36b40804dcad7a772c81b3692c34777cfdd86cb7e5c8e87d5c00d280bc55e7a9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..452cc574d001493a8d396af57abce81114626cd0 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/36b40804dcad7a772c81b3692c34777cfdd86cb7e5c8e87d5c00d280bc55e7a9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e319bbe7898d7c6c6dc9d8ad473b94e7be13ca371f4af53f3fd2ef72a7217e9 +size 20995 diff --git a/data/2025/2504_06xxx/2504.06311/images/3946471648e2f53715aed9d26a903ab60efe071d01f317f4210258f9049122a7.jpg b/data/2025/2504_06xxx/2504.06311/images/3946471648e2f53715aed9d26a903ab60efe071d01f317f4210258f9049122a7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ffc947f61f25c9f30a6702f2b2ab123128a1ae67 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/3946471648e2f53715aed9d26a903ab60efe071d01f317f4210258f9049122a7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:548ff499afd305e3d23a62558786a673f36314800debae73fc4214433f92a263 +size 6746 diff --git a/data/2025/2504_06xxx/2504.06311/images/3b9a04b650c01395686d3a9bfdacfae1cbb82a50279fcfc2bd4f9817798c7629.jpg b/data/2025/2504_06xxx/2504.06311/images/3b9a04b650c01395686d3a9bfdacfae1cbb82a50279fcfc2bd4f9817798c7629.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a136c54169bb209736beff48946838967f923160 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/3b9a04b650c01395686d3a9bfdacfae1cbb82a50279fcfc2bd4f9817798c7629.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d5238c421a0366d8391f0791eec080e58d9f53350e2e289442bee18783d1c4c +size 7731 diff --git a/data/2025/2504_06xxx/2504.06311/images/3f5c13107fec7b40dfb8852b2b1b4d06b84994c45e0842a9735a85ba38e37399.jpg b/data/2025/2504_06xxx/2504.06311/images/3f5c13107fec7b40dfb8852b2b1b4d06b84994c45e0842a9735a85ba38e37399.jpg new file mode 100644 index 0000000000000000000000000000000000000000..caf6c98ef07684394e05a38443eac55a9c36a42f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/3f5c13107fec7b40dfb8852b2b1b4d06b84994c45e0842a9735a85ba38e37399.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9400735d974ffb5c4b593ab08c93284ec215526aebb640251ddb26594287bf40 +size 4335 diff --git a/data/2025/2504_06xxx/2504.06311/images/3ff4fad4b6922ac9b775808d80c2e52e64e0bdd0222238fb46302e9792d486a7.jpg b/data/2025/2504_06xxx/2504.06311/images/3ff4fad4b6922ac9b775808d80c2e52e64e0bdd0222238fb46302e9792d486a7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..184c5763c36e13decb95eef8a3378ec0a3557612 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/3ff4fad4b6922ac9b775808d80c2e52e64e0bdd0222238fb46302e9792d486a7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5bff36d81025fe9d3b92a0a3838835fbfe727fa1d636f37d00b852780b15fde +size 19179 diff --git a/data/2025/2504_06xxx/2504.06311/images/41240c86f3dc7ef92bd433d9ea054df14aee43845c5886815c7a746fcfd18272.jpg b/data/2025/2504_06xxx/2504.06311/images/41240c86f3dc7ef92bd433d9ea054df14aee43845c5886815c7a746fcfd18272.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1879ade5fdf688243fdf50714d4ce817da30a5ba --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/41240c86f3dc7ef92bd433d9ea054df14aee43845c5886815c7a746fcfd18272.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21d15ad0dca03b9ab10d6f109fcddc1d45c7023f4b2312cdc3388405b6cc41ee +size 22147 diff --git a/data/2025/2504_06xxx/2504.06311/images/46dc08a5fc2398f6e3fd535ac2a3b8f880e1ed26ef679898911b6677925990eb.jpg b/data/2025/2504_06xxx/2504.06311/images/46dc08a5fc2398f6e3fd535ac2a3b8f880e1ed26ef679898911b6677925990eb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..78872313a221b42706ef1b8e690d5ff560f06d01 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/46dc08a5fc2398f6e3fd535ac2a3b8f880e1ed26ef679898911b6677925990eb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3188fb01a477ca1e44026a2eadc12f9f5e533d40813a67367d3c9ef8670b5738 +size 20777 diff --git a/data/2025/2504_06xxx/2504.06311/images/5288f746a3d9c59551d74b46534fb59e7bfd78a7e34567f04627a892543b58df.jpg b/data/2025/2504_06xxx/2504.06311/images/5288f746a3d9c59551d74b46534fb59e7bfd78a7e34567f04627a892543b58df.jpg new file mode 100644 index 0000000000000000000000000000000000000000..899289328d2305ba2d65d3c1cefa8eeb0cb61a6e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/5288f746a3d9c59551d74b46534fb59e7bfd78a7e34567f04627a892543b58df.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:beaa75f9c3f26339e4be08a6a373486e1ffbf4e7d17010df10a2e135e95b810e +size 17061 diff --git a/data/2025/2504_06xxx/2504.06311/images/5bb17e241be2f79ada098e07caca36cc7af2ffba3ddb0c74fca93996c1870e12.jpg b/data/2025/2504_06xxx/2504.06311/images/5bb17e241be2f79ada098e07caca36cc7af2ffba3ddb0c74fca93996c1870e12.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ab2db6da75a157dedf2c0e8815b9c4bd051d61a9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/5bb17e241be2f79ada098e07caca36cc7af2ffba3ddb0c74fca93996c1870e12.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c989b43ad2b54b45706f969d860810767dd7f31f2350ac8b6da1e74ec7353599 +size 13107 diff --git a/data/2025/2504_06xxx/2504.06311/images/5f2a96e0c1af772f0330b928316bb4518b1496e40fd924527949102072aa6cf5.jpg b/data/2025/2504_06xxx/2504.06311/images/5f2a96e0c1af772f0330b928316bb4518b1496e40fd924527949102072aa6cf5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3cd356cd7575d79b1f70ec382acec29e2df8c2a6 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/5f2a96e0c1af772f0330b928316bb4518b1496e40fd924527949102072aa6cf5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab76fb4313519a72dd12a6b4f328749957388364922910c63237e71004387d16 +size 20458 diff --git a/data/2025/2504_06xxx/2504.06311/images/60f56b36363b5e62b664b4fda9fd7d15b212e2b11c0e085fa5105f820f27e554.jpg b/data/2025/2504_06xxx/2504.06311/images/60f56b36363b5e62b664b4fda9fd7d15b212e2b11c0e085fa5105f820f27e554.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2162dd1758efcb8cb5f380c809bd22a920558402 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/60f56b36363b5e62b664b4fda9fd7d15b212e2b11c0e085fa5105f820f27e554.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bea3f6c6a67156d4e1334b2350d15a24883cf6ffc8333dd34001a016628b5fb +size 12054 diff --git a/data/2025/2504_06xxx/2504.06311/images/67c7cfcad83ea3c2c70ebae1a6e28f6ac8a004921890e9bd31e0f8baa85750ab.jpg b/data/2025/2504_06xxx/2504.06311/images/67c7cfcad83ea3c2c70ebae1a6e28f6ac8a004921890e9bd31e0f8baa85750ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..080f0309959b28a17bbdf426390315b1609827af --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/67c7cfcad83ea3c2c70ebae1a6e28f6ac8a004921890e9bd31e0f8baa85750ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d10d078f7d5a82e5129f056c20a284a0f6901105971555a5f8e91062f0624370 +size 20226 diff --git a/data/2025/2504_06xxx/2504.06311/images/6bb200cf265600ee644a5c41e6e2c4fcc79f9f4fe3811f9f401168875b39ae3c.jpg b/data/2025/2504_06xxx/2504.06311/images/6bb200cf265600ee644a5c41e6e2c4fcc79f9f4fe3811f9f401168875b39ae3c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4690eb62824a0b1d9139f17d95c14c67605a0118 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/6bb200cf265600ee644a5c41e6e2c4fcc79f9f4fe3811f9f401168875b39ae3c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6261181d38e2741bce534ee648e5171c49831824162434fe49465bdc4217f3e +size 12459 diff --git a/data/2025/2504_06xxx/2504.06311/images/6fb82dec0dccfa3d8c1ed30bcc9878629f29b2090b1d4daa1b5f62c3ee02be85.jpg b/data/2025/2504_06xxx/2504.06311/images/6fb82dec0dccfa3d8c1ed30bcc9878629f29b2090b1d4daa1b5f62c3ee02be85.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a583738209defc27af229c3afa5f49a162a3e1d8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/6fb82dec0dccfa3d8c1ed30bcc9878629f29b2090b1d4daa1b5f62c3ee02be85.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:489ece8e13915dd2684f02d82fb7f39404e0e009176e19325f7e8933d0d6ee14 +size 19713 diff --git a/data/2025/2504_06xxx/2504.06311/images/75fa5e29537f55972cfe2ab54947c4583cfc8202904c13b1ea132fa3d329ee66.jpg b/data/2025/2504_06xxx/2504.06311/images/75fa5e29537f55972cfe2ab54947c4583cfc8202904c13b1ea132fa3d329ee66.jpg new file mode 100644 index 0000000000000000000000000000000000000000..536233e53582535bf05ca040372de9c1cf480cdf --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/75fa5e29537f55972cfe2ab54947c4583cfc8202904c13b1ea132fa3d329ee66.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:237bc54b17f775b5405b5769eb0be4cae3ad8bd7c592d4fec1225af5bb3acc58 +size 4022 diff --git a/data/2025/2504_06xxx/2504.06311/images/75fa902b814efe455e4e687ee56b920319ee15e0c247276fed013941dd966a6b.jpg b/data/2025/2504_06xxx/2504.06311/images/75fa902b814efe455e4e687ee56b920319ee15e0c247276fed013941dd966a6b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5e2eef9f95139a78f04093c979a9d14fd7bcfab9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/75fa902b814efe455e4e687ee56b920319ee15e0c247276fed013941dd966a6b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5389b5b3bd43a5f1c301ef04481b50a7bf91be3b0ffaa8ffb9e8ba0e9b69b0b +size 20372 diff --git a/data/2025/2504_06xxx/2504.06311/images/77325214c9a5a697d9c27756c1cfc8c1669913385909b07abdad9bc4bfa1f232.jpg b/data/2025/2504_06xxx/2504.06311/images/77325214c9a5a697d9c27756c1cfc8c1669913385909b07abdad9bc4bfa1f232.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0c235127f97801bef1b41f31682dc80fc7a7c5c5 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/77325214c9a5a697d9c27756c1cfc8c1669913385909b07abdad9bc4bfa1f232.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24a20e397d9912c760f9a10105982aa878eff2bc8a8e86590de815b224beece3 +size 15513 diff --git a/data/2025/2504_06xxx/2504.06311/images/790477f0bf5b12003888f702c24b80b8190c976c83a56c38334149a394dda126.jpg b/data/2025/2504_06xxx/2504.06311/images/790477f0bf5b12003888f702c24b80b8190c976c83a56c38334149a394dda126.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a2eb1d49741d916dfaba1ad0b4e2c481a6968366 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/790477f0bf5b12003888f702c24b80b8190c976c83a56c38334149a394dda126.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71269fe7ed9b98771450184b51808b51ff913731879ab69efe9eaafee572e2f4 +size 7260 diff --git a/data/2025/2504_06xxx/2504.06311/images/79105e03737063bf0d3574cf9f3347ffdb081d0f7005a9b3f158116807912a80.jpg b/data/2025/2504_06xxx/2504.06311/images/79105e03737063bf0d3574cf9f3347ffdb081d0f7005a9b3f158116807912a80.jpg new file mode 100644 index 0000000000000000000000000000000000000000..39b4ee525dce8515b692e6be33633aaedc99cc81 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/79105e03737063bf0d3574cf9f3347ffdb081d0f7005a9b3f158116807912a80.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:825cf1af0789c9bebe7d2f50d9349d36b346660186211be4b491b8cf79ddd09a +size 13014 diff --git a/data/2025/2504_06xxx/2504.06311/images/7d0df6daa0e60f943e48d51af1e118545c15659453e80866a132fc5dabc9f7dc.jpg b/data/2025/2504_06xxx/2504.06311/images/7d0df6daa0e60f943e48d51af1e118545c15659453e80866a132fc5dabc9f7dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..000b44d679ecd1777c64d2d1b4226d583bf4cfb3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/7d0df6daa0e60f943e48d51af1e118545c15659453e80866a132fc5dabc9f7dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6b5e12bb9802e2f049890feaa4bf3c12a7dc2e1df7dc0205916ad799e51b142 +size 8343 diff --git a/data/2025/2504_06xxx/2504.06311/images/823419a8c889392fc2b02cd1e4c1b440b94537a0158211bb0cbf7bc569ae4ec1.jpg b/data/2025/2504_06xxx/2504.06311/images/823419a8c889392fc2b02cd1e4c1b440b94537a0158211bb0cbf7bc569ae4ec1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6eb31281754aad39a218d607123151e8b17e5e3e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/823419a8c889392fc2b02cd1e4c1b440b94537a0158211bb0cbf7bc569ae4ec1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54d26ef967920159814dfa1844d1ddc4cc30d19e052520ea001c64f03bf6a1e7 +size 39684 diff --git a/data/2025/2504_06xxx/2504.06311/images/8440172285ba16eaeb2c520a87c7567b6450151cb2e9e25468a2c653a068c295.jpg b/data/2025/2504_06xxx/2504.06311/images/8440172285ba16eaeb2c520a87c7567b6450151cb2e9e25468a2c653a068c295.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d1137a75dd870aab83831fca21fee6bc9115b9f4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/8440172285ba16eaeb2c520a87c7567b6450151cb2e9e25468a2c653a068c295.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd32c4debc1a710c7999e4cf32258c309a744518ab3d3d6801f6a3057b1da07d +size 19797 diff --git a/data/2025/2504_06xxx/2504.06311/images/908672f03a05015deaed74a1844456cfe761d014f09263752aad0d47f951d0ba.jpg b/data/2025/2504_06xxx/2504.06311/images/908672f03a05015deaed74a1844456cfe761d014f09263752aad0d47f951d0ba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b3fdc01f432e4334007a5fe5f35a0415a07bd3e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/908672f03a05015deaed74a1844456cfe761d014f09263752aad0d47f951d0ba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7ac7f85416f4f6f2f8eb2db75499c6e9c6542dc2494b6ec7ff14007314dcfd0 +size 4229 diff --git a/data/2025/2504_06xxx/2504.06311/images/921fa998df71dbda1001c882766862254b51e8730c8684b124dbf457256bc250.jpg b/data/2025/2504_06xxx/2504.06311/images/921fa998df71dbda1001c882766862254b51e8730c8684b124dbf457256bc250.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8dafa432d49dcbb4f4b204f5a000739469823bec --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/921fa998df71dbda1001c882766862254b51e8730c8684b124dbf457256bc250.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e794a087de1546f7096680721b574e134d155fe2eea980abfd677daaa2804597 +size 19399 diff --git a/data/2025/2504_06xxx/2504.06311/images/96f8834ba4b59d1758ae3218cacfd6e013ddbc0795a8006f7b66b552dc15065a.jpg b/data/2025/2504_06xxx/2504.06311/images/96f8834ba4b59d1758ae3218cacfd6e013ddbc0795a8006f7b66b552dc15065a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fdd9c9c60cc6ea2ad644b678597cc9cc5e9a08fa --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/96f8834ba4b59d1758ae3218cacfd6e013ddbc0795a8006f7b66b552dc15065a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb67caf7a226a5af2a385f34c4828674b9f0531fd4202eb219bb7ec195ed0b76 +size 4359 diff --git a/data/2025/2504_06xxx/2504.06311/images/9c50c21f9f2753fbea75feba6489560ab1bb6bd4e497c4e119371cab5de559c0.jpg b/data/2025/2504_06xxx/2504.06311/images/9c50c21f9f2753fbea75feba6489560ab1bb6bd4e497c4e119371cab5de559c0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e9096a0c749dabe8a395e03ad1717efa0152a7fb --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/9c50c21f9f2753fbea75feba6489560ab1bb6bd4e497c4e119371cab5de559c0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9fba32250789d1180f3b2895c715f05bd84707503b6de527bd232218c074ac2 +size 56601 diff --git a/data/2025/2504_06xxx/2504.06311/images/a4811d8b72e0b779962645388f118c8dadfe40b3817213814d85446162eba8f8.jpg b/data/2025/2504_06xxx/2504.06311/images/a4811d8b72e0b779962645388f118c8dadfe40b3817213814d85446162eba8f8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ba8dd978cc62453caa94455daa3079347ed2ae77 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/a4811d8b72e0b779962645388f118c8dadfe40b3817213814d85446162eba8f8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f244c39a77f217337118ca1c960117771875c18e75ad5ff9ca31764334b6e33 +size 8035 diff --git a/data/2025/2504_06xxx/2504.06311/images/ac1f54c4acd4c26f1fbb5b2b4901becd11f79892dd7ce7e313931dd158e5060a.jpg b/data/2025/2504_06xxx/2504.06311/images/ac1f54c4acd4c26f1fbb5b2b4901becd11f79892dd7ce7e313931dd158e5060a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ca7a59c4877ee3d406a174bf0dd2c2466f9b82e6 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/ac1f54c4acd4c26f1fbb5b2b4901becd11f79892dd7ce7e313931dd158e5060a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6dc41039f473a8099f37df62980fe728c59d8a246464ff8d26908641b2ffce2 +size 31006 diff --git a/data/2025/2504_06xxx/2504.06311/images/b24fbb91f95a7b3b3fd9b8a04a914b391287c260958fa8809ac3f9870b57a917.jpg b/data/2025/2504_06xxx/2504.06311/images/b24fbb91f95a7b3b3fd9b8a04a914b391287c260958fa8809ac3f9870b57a917.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d846c61b424cadf33a60a9dcbc4dc6812cc1e12 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/b24fbb91f95a7b3b3fd9b8a04a914b391287c260958fa8809ac3f9870b57a917.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1e596628dcff7a0ddf6cb3ca03abaee9dec92ee65dd548d662d4bdeb9172fc8 +size 15109 diff --git a/data/2025/2504_06xxx/2504.06311/images/b8947759e9bead7e2b64a1301febbf053eed3a0fa8aef46ad73d69ef35fc88f1.jpg b/data/2025/2504_06xxx/2504.06311/images/b8947759e9bead7e2b64a1301febbf053eed3a0fa8aef46ad73d69ef35fc88f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a9fa1983013490fc141a4b981372c44f6966474 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/b8947759e9bead7e2b64a1301febbf053eed3a0fa8aef46ad73d69ef35fc88f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7e31ebc1944500dd93a73764204956e134f80fa98ced19762426a0f4b894af4 +size 15764 diff --git a/data/2025/2504_06xxx/2504.06311/images/b91b6ec0b5eab711dad3ae20feabbddc67cfc373e1242347c7a94cc1e9363cd3.jpg b/data/2025/2504_06xxx/2504.06311/images/b91b6ec0b5eab711dad3ae20feabbddc67cfc373e1242347c7a94cc1e9363cd3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..659bc4453a8a003b08c5aed47b8d47855dd35be3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/b91b6ec0b5eab711dad3ae20feabbddc67cfc373e1242347c7a94cc1e9363cd3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87db4a566a4382ff111203144684b069f81b4dd9f79e85fef8f14e31721a514c +size 8279 diff --git a/data/2025/2504_06xxx/2504.06311/images/bbed68b7c7c2a6dffa3e458e459a9cd13deffd3c1a4950f2c36f70d280dc1402.jpg b/data/2025/2504_06xxx/2504.06311/images/bbed68b7c7c2a6dffa3e458e459a9cd13deffd3c1a4950f2c36f70d280dc1402.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bbf7a6f3ba77bc80a9a9f89bf44063edcb6a1299 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/bbed68b7c7c2a6dffa3e458e459a9cd13deffd3c1a4950f2c36f70d280dc1402.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a2d7f9bb34e84774cb61f5b70375c5c608901592f14b79ec7249b69baf6cfb9 +size 14872 diff --git a/data/2025/2504_06xxx/2504.06311/images/bc61a0518220150b607c1d66cc7dc6947f380c2b1009e36ae95bd6ebef2eccde.jpg b/data/2025/2504_06xxx/2504.06311/images/bc61a0518220150b607c1d66cc7dc6947f380c2b1009e36ae95bd6ebef2eccde.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a9652a42f505de350b13f108d34e5c21117523b7 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/bc61a0518220150b607c1d66cc7dc6947f380c2b1009e36ae95bd6ebef2eccde.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e81b31eb0221aea909f21fa7f569e71ca80c9ccbc66574a5a2f4852e8bd3912b +size 25723 diff --git a/data/2025/2504_06xxx/2504.06311/images/c0ca63a466b48a65700cdca67ed5ce4af8d69cd93ba98cefd9921e9d28cfe97c.jpg b/data/2025/2504_06xxx/2504.06311/images/c0ca63a466b48a65700cdca67ed5ce4af8d69cd93ba98cefd9921e9d28cfe97c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce80f08ef3e6d3ab3cf9bf6974b67401d3bd2ff6 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/c0ca63a466b48a65700cdca67ed5ce4af8d69cd93ba98cefd9921e9d28cfe97c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82def6aaac831048dc71074496527cefb1a58658b1ccaeb344a244a0eaf3c95d +size 6634 diff --git a/data/2025/2504_06xxx/2504.06311/images/c4cebafb43021fd59b73e2a54abe86e3e358b8ba47278543a6279387ef8f17fb.jpg b/data/2025/2504_06xxx/2504.06311/images/c4cebafb43021fd59b73e2a54abe86e3e358b8ba47278543a6279387ef8f17fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4f581e3550ca9800855f77b64cb282a3e43e54c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/c4cebafb43021fd59b73e2a54abe86e3e358b8ba47278543a6279387ef8f17fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6f1dcf18d3cc78bff1f2ccdf04be17302a2c98969b1318ba832188e8f45ccf1 +size 16627 diff --git a/data/2025/2504_06xxx/2504.06311/images/dd3fffaab64c2aa57f33b3095c34259cf6579acc3062073b260c8ec1623d4fbf.jpg b/data/2025/2504_06xxx/2504.06311/images/dd3fffaab64c2aa57f33b3095c34259cf6579acc3062073b260c8ec1623d4fbf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ef48a9937921c3d53a8eb4e72138dfe907aed6f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/dd3fffaab64c2aa57f33b3095c34259cf6579acc3062073b260c8ec1623d4fbf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3155b35940b4547f3c1fb4cd7b69bdd5612e6b76af1ae4e7dd2541c2c7fc385 +size 9852 diff --git a/data/2025/2504_06xxx/2504.06311/images/de065f1018fd63684ca76984649d3bbde8bb74dc9f72d0dd5cb8341546d23646.jpg b/data/2025/2504_06xxx/2504.06311/images/de065f1018fd63684ca76984649d3bbde8bb74dc9f72d0dd5cb8341546d23646.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d7338372193dc7ef9552312b1da21cc22b2c14f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/de065f1018fd63684ca76984649d3bbde8bb74dc9f72d0dd5cb8341546d23646.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c2ab411c9efda7976f7de6c309684dcdd5aa88cc36311e8b261818cd0fccfca +size 11707 diff --git a/data/2025/2504_06xxx/2504.06311/images/e14dd07adfafe5f6159b6067571cb4b5d052bc770da68484928b81b7d7546a66.jpg b/data/2025/2504_06xxx/2504.06311/images/e14dd07adfafe5f6159b6067571cb4b5d052bc770da68484928b81b7d7546a66.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ec8c496f8233e8bf5664023cae7e2a8fd3198f3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/e14dd07adfafe5f6159b6067571cb4b5d052bc770da68484928b81b7d7546a66.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4813d8fbed0599fbe595abe17a93098bebc48d029c7cfcbe7450fb5a22b71318 +size 26211 diff --git a/data/2025/2504_06xxx/2504.06311/images/e49e4eeeefff0ee45a136194aac6af3b509d72117db7264bff2f5e731f512942.jpg b/data/2025/2504_06xxx/2504.06311/images/e49e4eeeefff0ee45a136194aac6af3b509d72117db7264bff2f5e731f512942.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20b6a30891b668ed7eaf84b769d5291f07a8150f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/e49e4eeeefff0ee45a136194aac6af3b509d72117db7264bff2f5e731f512942.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:734737c70f63fd183c6b38c814afda492777780ab1e62dab870472ebe4df0c32 +size 8905 diff --git a/data/2025/2504_06xxx/2504.06311/images/eefd18541489eb0898ad31fa879c5762152ff2479e5c18822d2667f50c066d6b.jpg b/data/2025/2504_06xxx/2504.06311/images/eefd18541489eb0898ad31fa879c5762152ff2479e5c18822d2667f50c066d6b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0b725e1d038909ce51dfdea8e8c9fc412b86ae9a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/eefd18541489eb0898ad31fa879c5762152ff2479e5c18822d2667f50c066d6b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f39e5f2c2e4d823704048eb7c5fd3e34575dabcf52281a535fe66bb0f5883998 +size 29317 diff --git a/data/2025/2504_06xxx/2504.06311/images/f1b54cdfe48e3f891784027e9e717ec71afd8653ef63103aea8b00ad7e668023.jpg b/data/2025/2504_06xxx/2504.06311/images/f1b54cdfe48e3f891784027e9e717ec71afd8653ef63103aea8b00ad7e668023.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a495fd9b6d1c79be33fe26511bf835a4f76f5366 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/f1b54cdfe48e3f891784027e9e717ec71afd8653ef63103aea8b00ad7e668023.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ddad539115a2104181445f4d0034f4334fb984f24e7af85d01f8723a24703cde +size 10473 diff --git a/data/2025/2504_06xxx/2504.06311/images/f242b53b6c4f836190beb8603fe576dde41c2a973fa828c47af862af1cdb72d7.jpg b/data/2025/2504_06xxx/2504.06311/images/f242b53b6c4f836190beb8603fe576dde41c2a973fa828c47af862af1cdb72d7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c65b33cef70f2cf06618f54e9e9b4c42ee7ae975 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/images/f242b53b6c4f836190beb8603fe576dde41c2a973fa828c47af862af1cdb72d7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16fa92fdf839f78f332808f67f2366a4a58479b619438c5049dac03671bc2686 +size 10079 diff --git a/data/2025/2504_06xxx/2504.06311/layout.json b/data/2025/2504_06xxx/2504.06311/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..647052af68df9eeb2bdd6188cce5d84cc390d84f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06311/layout.json @@ -0,0 +1,18040 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 108, + 75, + 501, + 113 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 75, + 501, + 113 + ], + "spans": [ + { + "bbox": [ + 108, + 75, + 501, + 113 + ], + "type": "text", + "content": "Bifrost: Reinventing WiFi Signals Based on Dispersion Effect for Accurate Indoor Localization" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 84, + 125, + 525, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 125, + 525, + 138 + ], + "spans": [ + { + "bbox": [ + 84, + 125, + 525, + 138 + ], + "type": "text", + "content": "Yimiao Sun, Yuan He*, Jiacheng Zhang, Xin Na, Yande Chen, Weiguo Wang, Xiuzhen Guo" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 194, + 139, + 416, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 139, + 416, + 151 + ], + "spans": [ + { + "bbox": [ + 194, + 139, + 416, + 151 + ], + "type": "text", + "content": "School of Software and BNrist, Tsinghua University" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 189, + 152, + 422, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 152, + 422, + 163 + ], + "spans": [ + { + "bbox": [ + 189, + 152, + 422, + 163 + ], + "type": "text", + "content": "sym21@mails.tsinghua.edu.cn, heyuan@tsinghua.edu.cn" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 136, + 163, + 474, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 163, + 474, + 175 + ], + "spans": [ + { + "bbox": [ + 136, + 163, + 474, + 175 + ], + "type": "text", + "content": "{zhangjc21,nx20,cyd22,wwg18}@mails.tsinghua.edu.cn, guoxiuzhen94@gmail.com" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 182, + 115, + 193 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 182, + 115, + 193 + ], + "spans": [ + { + "bbox": [ + 51, + 182, + 115, + 193 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 197, + 296, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 197, + 296, + 470 + ], + "spans": [ + { + "bbox": [ + 50, + 197, + 296, + 470 + ], + "type": "text", + "content": "WiFi-based device localization is a key enabling technology for smart applications, which has attracted numerous research studies in the past decade. Most of the existing approaches rely on Line-of-Sight (LoS) signals to work, while a critical problem is often neglected: In the real-world indoor environments, WiFi signals are everywhere, but very few of them are usable for accurate localization. As a result, the localization accuracy in practice is far from being satisfactory. This paper presents Bifrost, a novel hardware-software co-design for accurate indoor localization. The core idea of Bifrost is to reinvent WiFi signals, so as to provide sufficient LoS signals for localization. This is realized by exploiting the dispersion effect of signals emitted by the leaky wave antenna (LWA). We present a low-cost plug-in design of LWA that can generate orthogonal polarized signals: On one hand, LWA disperses signals of different frequencies to different angles, thus providing Angle-of-Arrival (AoA) information for the localized target. On the other hand, the target further leverages the antenna polarization mismatch to distinguish AoAs from different LWAs. In the software layer, fine-grained information in Channel State Information (CSI) is exploited to cope with multipath and noise. We implement Bifrost and evaluate its performance under various settings. The results show that the median localization error of Bifrost is " + }, + { + "bbox": [ + 50, + 197, + 296, + 470 + ], + "type": "inline_equation", + "content": "0.81\\mathrm{m}" + }, + { + "bbox": [ + 50, + 197, + 296, + 470 + ], + "type": "text", + "content": ", which is " + }, + { + "bbox": [ + 50, + 197, + 296, + 470 + ], + "type": "inline_equation", + "content": "52.35\\%" + }, + { + "bbox": [ + 50, + 197, + 296, + 470 + ], + "type": "text", + "content": " less than that of SpotFi, a state-of-the-art approach. SpotFi, when combined with Bifrost to work in the realistic settings, can reduce the localization error by " + }, + { + "bbox": [ + 50, + 197, + 296, + 470 + ], + "type": "inline_equation", + "content": "33.54\\%" + }, + { + "bbox": [ + 50, + 197, + 296, + 470 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 481, + 134, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 481, + 134, + 491 + ], + "spans": [ + { + "bbox": [ + 51, + 481, + 134, + 491 + ], + "type": "text", + "content": "CCS CONCEPTS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 497, + 293, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 497, + 293, + 518 + ], + "spans": [ + { + "bbox": [ + 51, + 497, + 293, + 518 + ], + "type": "text", + "content": "- Networks " + }, + { + "bbox": [ + 51, + 497, + 293, + 518 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 51, + 497, + 293, + 518 + ], + "type": "text", + "content": " Location based services; " + }, + { + "bbox": [ + 51, + 497, + 293, + 518 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 51, + 497, + 293, + 518 + ], + "type": "text", + "content": " Information systems " + }, + { + "bbox": [ + 51, + 497, + 293, + 518 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 51, + 497, + 293, + 518 + ], + "type": "text", + "content": " Location based services;" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 529, + 120, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 529, + 120, + 540 + ], + "spans": [ + { + "bbox": [ + 51, + 529, + 120, + 540 + ], + "type": "text", + "content": "KEYWORDS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 544, + 294, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 544, + 294, + 566 + ], + "spans": [ + { + "bbox": [ + 50, + 544, + 294, + 566 + ], + "type": "text", + "content": "WiFi Localization, Indoor Localization, Leaky Wave Antenna, RF Computing" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 578, + 162, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 578, + 162, + 589 + ], + "spans": [ + { + "bbox": [ + 51, + 578, + 162, + 589 + ], + "type": "inline_equation", + "content": "^{\\dagger}" + }, + { + "bbox": [ + 51, + 578, + 162, + 589 + ], + "type": "text", + "content": "Yuan He is the corresponding author." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 55, + 620, + 129, + 647 + ], + "blocks": [ + { + "bbox": [ + 55, + 620, + 129, + 647 + ], + "lines": [ + { + "bbox": [ + 55, + 620, + 129, + 647 + ], + "spans": [ + { + "bbox": [ + 55, + 620, + 129, + 647 + ], + "type": "image", + "image_path": "908672f03a05015deaed74a1844456cfe761d014f09263752aad0d47f951d0ba.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 649, + 294, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 649, + 294, + 668 + ], + "spans": [ + { + "bbox": [ + 52, + 649, + 294, + 668 + ], + "type": "text", + "content": "This work is licensed under a Creative Commons Attribution International 4.0 License." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 52, + 669, + 198, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 669, + 198, + 677 + ], + "spans": [ + { + "bbox": [ + 52, + 669, + 198, + 677 + ], + "type": "text", + "content": "SenSys'23,November 12-17,2023,Istanbul,Turkiye" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 52, + 677, + 192, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 677, + 192, + 685 + ], + "spans": [ + { + "bbox": [ + 52, + 677, + 192, + 685 + ], + "type": "text", + "content": "© 2023 Copyright held by the owner/author(s)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 52, + 685, + 176, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 685, + 176, + 693 + ], + "spans": [ + { + "bbox": [ + 52, + 685, + 176, + 693 + ], + "type": "text", + "content": "ACM ISBN 979-8-4007-0414-7/23/11...$15.00" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 52, + 693, + 179, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 693, + 179, + 701 + ], + "spans": [ + { + "bbox": [ + 52, + 693, + 179, + 701 + ], + "type": "text", + "content": "https://doi.org/10.1145/3625687.3625786" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 340, + 180, + 531, + 287 + ], + "blocks": [ + { + "bbox": [ + 340, + 180, + 531, + 287 + ], + "lines": [ + { + "bbox": [ + 340, + 180, + 531, + 287 + ], + "spans": [ + { + "bbox": [ + 340, + 180, + 531, + 287 + ], + "type": "image", + "image_path": "5288f746a3d9c59551d74b46534fb59e7bfd78a7e34567f04627a892543b58df.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 413, + 290, + 423, + 299 + ], + "lines": [ + { + "bbox": [ + 413, + 290, + 423, + 299 + ], + "spans": [ + { + "bbox": [ + 413, + 290, + 423, + 299 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 338, + 307, + 515, + 414 + ], + "blocks": [ + { + "bbox": [ + 338, + 307, + 515, + 414 + ], + "lines": [ + { + "bbox": [ + 338, + 307, + 515, + 414 + ], + "spans": [ + { + "bbox": [ + 338, + 307, + 515, + 414 + ], + "type": "image", + "image_path": "3ff4fad4b6922ac9b775808d80c2e52e64e0bdd0222238fb46302e9792d486a7.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 413, + 419, + 424, + 428 + ], + "lines": [ + { + "bbox": [ + 413, + 419, + 424, + 428 + ], + "spans": [ + { + "bbox": [ + 413, + 419, + 424, + 428 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 314, + 431, + 559, + 461 + ], + "lines": [ + { + "bbox": [ + 314, + 431, + 559, + 461 + ], + "spans": [ + { + "bbox": [ + 314, + 431, + 559, + 461 + ], + "type": "text", + "content": "Figure 1: A model-driven method works well when (a) sufficient LoS signals are available but becomes inaccurate when (b) NLoS signals have to be used." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 476, + 405, + 485 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 476, + 405, + 485 + ], + "spans": [ + { + "bbox": [ + 315, + 476, + 405, + 485 + ], + "type": "text", + "content": "ACM Reference Format:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 314, + 486, + 560, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 486, + 560, + 545 + ], + "spans": [ + { + "bbox": [ + 314, + 486, + 560, + 545 + ], + "type": "text", + "content": "Yimiao Sun, Yuan He, Jiacheng Zhang, Xin Na, Yande Chen, Weiguo Wang, Xiuzhen Guo. 2023. Bifrost: Reinventing WiFi Signals Based on Dispersion Effect for Accurate Indoor Localization. In The 21st ACM Conference on Embedded Networked Sensor Systems (SenSys '23), November 12-17, 2023, Istanbul, Türkiye. ACM, New York, NY, USA, 14 pages. https://doi.org/10.1145/3625687.3625786" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 315, + 555, + 427, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 555, + 427, + 567 + ], + "spans": [ + { + "bbox": [ + 315, + 555, + 427, + 567 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 313, + 570, + 559, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 570, + 559, + 668 + ], + "spans": [ + { + "bbox": [ + 313, + 570, + 559, + 668 + ], + "type": "text", + "content": "Location information [32, 63, 79, 80] is crucial, especially for smart indoor applications [50, 60, 67, 72], such as smart home [54, 62], indoor navigation [7, 18, 19, 59] and so on. Due to the ubiquitous deployment of WiFi access points (APs) and wide availability of WiFi modules on the devices, WiFi-based localization [16, 25, 49, 56, 57, 61, 64, 65, 68-70, 73, 74, 82] appears to be promising for indoor localization. The existing works of WiFi-based indoor localization can be broadly grouped into two categories, data-driven methods and model-driven methods." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 314, + 669, + 559, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 669, + 559, + 702 + ], + "spans": [ + { + "bbox": [ + 314, + 669, + 559, + 702 + ], + "type": "text", + "content": "Data-driven methods are typically represented by fingerprint [14, 44, 61]. These methods need to collect Received Signal Strength (RSS) or CSI at different places to construct a database mapping RSS" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 731, + 315, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 731, + 315, + 740 + ], + "spans": [ + { + "bbox": [ + 297, + 731, + 315, + 740 + ], + "type": "text", + "content": "376" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 81, + 161, + 176 + ], + "blocks": [ + { + "bbox": [ + 75, + 81, + 161, + 176 + ], + "lines": [ + { + "bbox": [ + 75, + 81, + 161, + 176 + ], + "spans": [ + { + "bbox": [ + 75, + 81, + 161, + 176 + ], + "type": "image", + "image_path": "dd3fffaab64c2aa57f33b3095c34259cf6579acc3062073b260c8ec1623d4fbf.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 78, + 178, + 158, + 188 + ], + "lines": [ + { + "bbox": [ + 78, + 178, + 158, + 188 + ], + "spans": [ + { + "bbox": [ + 78, + 178, + 158, + 188 + ], + "type": "text", + "content": "(a) Library (48 rooms)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 184, + 81, + 271, + 177 + ], + "blocks": [ + { + "bbox": [ + 184, + 81, + 271, + 177 + ], + "lines": [ + { + "bbox": [ + 184, + 81, + 271, + 177 + ], + "spans": [ + { + "bbox": [ + 184, + 81, + 271, + 177 + ], + "type": "image", + "image_path": "e49e4eeeefff0ee45a136194aac6af3b509d72117db7264bff2f5e731f512942.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 196, + 178, + 269, + 187 + ], + "lines": [ + { + "bbox": [ + 196, + 178, + 269, + 187 + ], + "spans": [ + { + "bbox": [ + 196, + 178, + 269, + 187 + ], + "type": "text", + "content": "(b) Office (54 rooms)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 50, + 192, + 294, + 213 + ], + "lines": [ + { + "bbox": [ + 50, + 192, + 294, + 213 + ], + "spans": [ + { + "bbox": [ + 50, + 192, + 294, + 213 + ], + "type": "text", + "content": "Figure 2: The number of LoS APs in each room in a library and an office building." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 228, + 295, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 228, + 295, + 251 + ], + "spans": [ + { + "bbox": [ + 50, + 228, + 295, + 251 + ], + "type": "text", + "content": "(or CSI) with locations, which is a labor-intensive process. Also, their performance may be vulnerable to dynamic environments." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 251, + 295, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 251, + 295, + 447 + ], + "spans": [ + { + "bbox": [ + 50, + 251, + 295, + 447 + ], + "type": "text", + "content": "Model-driven methods induce less labor cost and attract more research studies. Generally, a model-driven method calculates the location by estimating signals' Angle-of-Arrival (AoA) [2, 23, 24, 69], Time-of-Flight (ToF) [70, 81] or both [9, 16, 43]. Most of the existing approaches rely on Line-of-Sight (LoS) signals to work, as Fig. 1(a) illustrates, while a critical problem is often neglected: In the real-world indoor environments, WiFi signals are everywhere, but very few of them are usable for accurate localization. As an example to validate this finding, Fig. 2 plots the statistics of the real deployment of WiFi APs in a library (48 rooms) and an office building (54 rooms). The data shows that in nearly half of all the rooms, there is not even one LoS AP available. The rooms with sufficient LoS signals account for less than " + }, + { + "bbox": [ + 50, + 251, + 295, + 447 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 50, + 251, + 295, + 447 + ], + "type": "text", + "content": " of all the rooms. In other words, the chance for a WiFi device to receive sufficient LoS WiFi AP signals, namely the case for it to be accurately localized by using an existing approach, is less than " + }, + { + "bbox": [ + 50, + 251, + 295, + 447 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 50, + 251, + 295, + 447 + ], + "type": "text", + "content": ". That well explains why the practical performance of using the existing localization approaches is far from being satisfactory." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 448, + 295, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 448, + 295, + 544 + ], + "spans": [ + { + "bbox": [ + 50, + 448, + 295, + 544 + ], + "type": "text", + "content": "A straightforward idea to address the above problem is to increase the number of deployed WiFi APs, until everywhere is covered by at least 3 LoS APs. It isn't practical, however. Taking the library and office building investigated in Fig. 2 as an example, typically there are 50 rooms in a building. Covering every room with 3 APs requires 150 APs to be deployed, which means multiple drawbacks, such as substantial deployment cost of cables (connecting the APs), overly crowded wireless spectrum, and frequent interference and collisions in the wireless communication." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 545, + 295, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 545, + 295, + 656 + ], + "spans": [ + { + "bbox": [ + 50, + 545, + 295, + 656 + ], + "type": "text", + "content": "This paper presents a novel approach called Bifrost, a plug-and-play and cost-effective scheme to significantly enhance the availability of LoS WiFi signals and in turn the localization accuracy. In light of the research progress on leaky wave antenna (LWA) in recent years [21, 22, 42, 47, 48, 76], Bifrost exploits dispersion effect of wireless signals [33]. Deployed in the space covered by WiFi signals, a LWA can receive those signals and then radiate them at different frequencies towards different directions, exhibiting frequency and spatial division multiplexing (FSDM) features, as is reinventing2 WiFi signals." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 328, + 82, + 545, + 292 + ], + "blocks": [ + { + "bbox": [ + 328, + 82, + 545, + 292 + ], + "lines": [ + { + "bbox": [ + 328, + 82, + 545, + 292 + ], + "spans": [ + { + "bbox": [ + 328, + 82, + 545, + 292 + ], + "type": "image", + "image_path": "823419a8c889392fc2b02cd1e4c1b440b94537a0158211bb0cbf7bc569ae4ec1.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 353, + 297, + 520, + 308 + ], + "lines": [ + { + "bbox": [ + 353, + 297, + 520, + 308 + ], + "spans": [ + { + "bbox": [ + 353, + 297, + 520, + 308 + ], + "type": "text", + "content": "Figure 3: The high-level principle of Bifrost." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 336, + 559, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 336, + 559, + 412 + ], + "spans": [ + { + "bbox": [ + 314, + 336, + 559, + 412 + ], + "type": "text", + "content": "Fig. 3 illustrates the high-level principle of Bifrost. To localize a target device, Bifrost uses two LWAs to transform WiFi signals into FSDM signals, so the target device will receive two LoS FSDM signals with a unique pair of frequencies. Since the frequency and the propagation direction of FSDM signals are coupled, the target device can estimate its AoAs to both LWAs by analyzing the received spectrum and then calculate its location." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 413, + 559, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 413, + 559, + 436 + ], + "spans": [ + { + "bbox": [ + 315, + 413, + 559, + 436 + ], + "type": "text", + "content": "Compared with using WiFi APs, using LWA to assist localization offers the following two distinct advantages:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 437, + 559, + 517 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 317, + 437, + 559, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 437, + 559, + 482 + ], + "spans": [ + { + "bbox": [ + 317, + 437, + 559, + 482 + ], + "type": "text", + "content": "1) Cost-effective. The cost of a LWA in Bifrost is 7.41 USD (4.36 USD for the material cost and 3.05 USD for the control module), which is significantly lower than that of a WiFi AP (typically " + }, + { + "bbox": [ + 317, + 437, + 559, + 482 + ], + "type": "inline_equation", + "content": "30 \\sim 100" + }, + { + "bbox": [ + 317, + 437, + 559, + 482 + ], + "type": "text", + "content": " USD [3-6])." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 484, + 559, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 484, + 559, + 517 + ], + "spans": [ + { + "bbox": [ + 316, + 484, + 559, + 517 + ], + "type": "text", + "content": "2) Easy to Use. Deploying a LWA is very convenient. It can operate in a plug-and-play manner without the need for connecting power cables." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 518, + 559, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 518, + 559, + 571 + ], + "spans": [ + { + "bbox": [ + 314, + 518, + 559, + 571 + ], + "type": "text", + "content": "Leveraging these two advantages, Bifrost can be easily implemented in any environment with WiFi coverage, no matter whether the WiFi signals are LoS or not. Bifrost can either work independently, or cooperatively with other conventional WiFi-based localization methods." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 571, + 559, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 571, + 559, + 593 + ], + "spans": [ + { + "bbox": [ + 314, + 571, + 559, + 593 + ], + "type": "text", + "content": "The design of Bifrost tackles several critical challenges, which are summarized as follows:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 596, + 559, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 596, + 559, + 705 + ], + "spans": [ + { + "bbox": [ + 313, + 596, + 559, + 705 + ], + "type": "text", + "content": "Ambiguity between Different LWAs. As Fig. 3 shows, a target device may receive signals from two LWAs, which are reinvented from the same WiFi signal source. Without a special design, it is almost impossible for the target to distinguish one LWA from the other. To overcome this problem, the LWAs in Bifrost are designed to generate orthogonal circular polarized (CP) signals, so that they won't mix up with each other (§3.1). Polarization of LWA signals can be conveniently switched by altering the input port of WiFi signals, without the need for reconstruction or modifications to the LWA's structure." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 57, + 246, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 57, + 246, + 68 + ], + "spans": [ + { + "bbox": [ + 51, + 57, + 246, + 68 + ], + "type": "text", + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "spans": [ + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "type": "text", + "content": "Yimiao Sun, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 665, + 295, + 707 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 665, + 295, + 707 + ], + "spans": [ + { + "bbox": [ + 50, + 665, + 295, + 707 + ], + "type": "text", + "content": "1 In Norse mythology, Bifrost is a rainbow bridge that reaches between Midgard (Earth) and Asgard (the realm of gods). \n2 The word \"reinventing\" means that Bifrost makes WiFi signals look different from their original form by using the LWAs. The signal emitted by the LWAs has two new properties, dispersion effect and circular polarization." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 297, + 731, + 315, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 731, + 315, + 740 + ], + "spans": [ + { + "bbox": [ + 297, + 731, + 315, + 740 + ], + "type": "text", + "content": "377" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 82, + 170, + 165 + ], + "blocks": [ + { + "bbox": [ + 51, + 82, + 170, + 165 + ], + "lines": [ + { + "bbox": [ + 51, + 82, + 170, + 165 + ], + "spans": [ + { + "bbox": [ + 51, + 82, + 170, + 165 + ], + "type": "image", + "image_path": "2109f2078048336a2c4a81a265742db1ac720ab5b8a945f966c7e74d9179e936.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 59, + 175, + 161, + 186 + ], + "lines": [ + { + "bbox": [ + 59, + 175, + 161, + 186 + ], + "spans": [ + { + "bbox": [ + 59, + 175, + 161, + 186 + ], + "type": "text", + "content": "(a) Linear polarization (LP)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 173, + 82, + 310, + 167 + ], + "blocks": [ + { + "bbox": [ + 173, + 82, + 310, + 167 + ], + "lines": [ + { + "bbox": [ + 173, + 82, + 310, + 167 + ], + "spans": [ + { + "bbox": [ + 173, + 82, + 310, + 167 + ], + "type": "image", + "image_path": "5bb17e241be2f79ada098e07caca36cc7af2ffba3ddb0c74fca93996c1870e12.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 190, + 175, + 300, + 186 + ], + "lines": [ + { + "bbox": [ + 190, + 175, + 300, + 186 + ], + "spans": [ + { + "bbox": [ + 190, + 175, + 300, + 186 + ], + "type": "text", + "content": "(b) Circular polarization (CP)." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 314, + 82, + 446, + 171 + ], + "blocks": [ + { + "bbox": [ + 314, + 82, + 446, + 171 + ], + "lines": [ + { + "bbox": [ + 314, + 82, + 446, + 171 + ], + "spans": [ + { + "bbox": [ + 314, + 82, + 446, + 171 + ], + "type": "image", + "image_path": "bbed68b7c7c2a6dffa3e458e459a9cd13deffd3c1a4950f2c36f70d280dc1402.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 194, + 195, + 416, + 206 + ], + "lines": [ + { + "bbox": [ + 194, + 195, + 416, + 206 + ], + "spans": [ + { + "bbox": [ + 194, + 195, + 416, + 206 + ], + "type": "text", + "content": "Figure 4: The properties of polarized electromagnetic waves." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 447, + 82, + 545, + 171 + ], + "blocks": [ + { + "bbox": [ + 331, + 175, + 443, + 187 + ], + "lines": [ + { + "bbox": [ + 331, + 175, + 443, + 187 + ], + "spans": [ + { + "bbox": [ + 331, + 175, + 443, + 187 + ], + "type": "text", + "content": "(c) Elliptical polarization (EP)." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 447, + 82, + 545, + 171 + ], + "lines": [ + { + "bbox": [ + 447, + 82, + 545, + 171 + ], + "spans": [ + { + "bbox": [ + 447, + 82, + 545, + 171 + ], + "type": "image", + "image_path": "de065f1018fd63684ca76984649d3bbde8bb74dc9f72d0dd5cb8341546d23646.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 463, + 175, + 547, + 186 + ], + "lines": [ + { + "bbox": [ + 463, + 175, + 547, + 186 + ], + "spans": [ + { + "bbox": [ + 463, + 175, + 547, + 186 + ], + "type": "text", + "content": "(d) CP signal synthesis." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 223, + 296, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 223, + 296, + 332 + ], + "spans": [ + { + "bbox": [ + 50, + 223, + 296, + 332 + ], + "type": "text", + "content": "Signal Extraction from the Interfered Frequency Band. Since FSDM signals radiated by LWAs are transformed from existing WiFi signals, the two types of signals operate within the same frequency band and can be simultaneously received by a target device. Directly using such signals leads to erroneous AoA estimation. To deal with such interference, LWAs in Bifrost work in a duty-cycled manner. The target device is able to detect distinctive variation of the signal amplitude at the frequencies of FSDM signals (§3.3). By analyzing WiFi CSI, the target device can effectively extract the desired FSDM signals from the interfered frequency band." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 334, + 296, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 334, + 296, + 445 + ], + "spans": [ + { + "bbox": [ + 50, + 334, + 296, + 445 + ], + "type": "text", + "content": "Indoor Multipath Effect. The multipath effect in the indoor environment may seriously affect the quality of the received FSDM signals and further affect the localization accuracy. In order to identify FSDM signals propagating along the LoS path, Bifrost operates in two steps. First, we map frequencies of FSDM signals with subcarriers in CSI and cluster adjacent subcarriers to only retain the cluster with the highest energy (§3.4). Second, we take the intersection of two clusters (corresponding to the two orthogonal CP signals), and determine the final frequency by weighting the center frequency of the remaining clustered subcarriers (§3.5)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 60, + 445, + 244, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 445, + 244, + 456 + ], + "spans": [ + { + "bbox": [ + 60, + 445, + 244, + 456 + ], + "type": "text", + "content": "Our contributions can be summarized as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 458, + 295, + 628 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 53, + 458, + 295, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 458, + 295, + 514 + ], + "spans": [ + { + "bbox": [ + 53, + 458, + 295, + 514 + ], + "type": "text", + "content": "1) We tackle a significant problem, namely the limited availability of LoS signals, which is overlooked by the existing works on WiFi-based indoor localization. We reinvent WiFi signals by exploiting the dispersion effect, which represents a new direction of utilizing LWAs." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 53, + 516, + 294, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 516, + 294, + 559 + ], + "spans": [ + { + "bbox": [ + 53, + 516, + 294, + 559 + ], + "type": "text", + "content": "2) We address a series of non-trivial challenges, such as signal ambiguity, interference, and multipath effect, etc. The design of Bifrost effectively ensures the quality of signals used for localization." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 53, + 563, + 295, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 563, + 295, + 628 + ], + "spans": [ + { + "bbox": [ + 53, + 563, + 295, + 628 + ], + "type": "text", + "content": "3) We implement Bifrost and evaluate its performance under various settings. The results show that the median localization error of Bifrost is " + }, + { + "bbox": [ + 53, + 563, + 295, + 628 + ], + "type": "inline_equation", + "content": "0.81\\mathrm{m}" + }, + { + "bbox": [ + 53, + 563, + 295, + 628 + ], + "type": "text", + "content": ", which is " + }, + { + "bbox": [ + 53, + 563, + 295, + 628 + ], + "type": "inline_equation", + "content": "52.35\\%" + }, + { + "bbox": [ + 53, + 563, + 295, + 628 + ], + "type": "text", + "content": " less than that of SpotFi, a state-of-the-art approach. SpotFi, when combined with Bifrost to work in the realistic settings, can reduce the localization error by " + }, + { + "bbox": [ + 53, + 563, + 295, + 628 + ], + "type": "inline_equation", + "content": "33.54\\%" + }, + { + "bbox": [ + 53, + 563, + 295, + 628 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 629, + 296, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 629, + 296, + 694 + ], + "spans": [ + { + "bbox": [ + 50, + 629, + 296, + 694 + ], + "type": "text", + "content": "This paper proceeds as follows: §2 introduces background knowledge on the signal polarization and the LWA. Then §3 unfolds the design of Bifrost in both hardware and software. The implementation and evaluation results are presented in §4. We discuss practical issues in §5 and summarize related works in §6. This work is concluded in §7." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 222, + 379, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 222, + 379, + 232 + ], + "spans": [ + { + "bbox": [ + 315, + 222, + 379, + 232 + ], + "type": "text", + "content": "2 PRIMER" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 236, + 559, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 236, + 559, + 258 + ], + "spans": [ + { + "bbox": [ + 314, + 236, + 559, + 258 + ], + "type": "text", + "content": "This section introduces preliminary knowledge of our work: polarization of wireless signals and leaky wave antenna." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 269, + 435, + 281 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 269, + 435, + 281 + ], + "spans": [ + { + "bbox": [ + 315, + 269, + 435, + 281 + ], + "type": "text", + "content": "2.1 Signal Polarization" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 314, + 283, + 558, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 283, + 558, + 316 + ], + "spans": [ + { + "bbox": [ + 314, + 283, + 558, + 316 + ], + "type": "text", + "content": "Polarization is a fundamental property of wireless signals, including FSDM and WiFi signals investigated in this work. It represents the direction of the signal's electric field, which can be denoted as " + }, + { + "bbox": [ + 314, + 283, + 558, + 316 + ], + "type": "inline_equation", + "content": "\\vec{E}" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 314, + 319, + 557, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 319, + 557, + 365 + ], + "spans": [ + { + "bbox": [ + 314, + 319, + 557, + 365 + ], + "type": "text", + "content": "and can be decomposed into the horizontal component " + }, + { + "bbox": [ + 314, + 319, + 557, + 365 + ], + "type": "inline_equation", + "content": "\\overrightarrow{E_x}" + }, + { + "bbox": [ + 314, + 319, + 557, + 365 + ], + "type": "text", + "content": " and the vertical component " + }, + { + "bbox": [ + 314, + 319, + 557, + 365 + ], + "type": "inline_equation", + "content": "\\overrightarrow{E_y}" + }, + { + "bbox": [ + 314, + 319, + 557, + 365 + ], + "type": "text", + "content": ". There will be a phase difference " + }, + { + "bbox": [ + 314, + 319, + 557, + 365 + ], + "type": "inline_equation", + "content": "\\Delta \\phi \\in [0, \\pi)" + }, + { + "bbox": [ + 314, + 319, + 557, + 365 + ], + "type": "text", + "content": " between these two orthogonal components, leading to the following elliptic equation" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 337, + 370, + 533, + 400 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 370, + 533, + 400 + ], + "spans": [ + { + "bbox": [ + 337, + 370, + 533, + 400 + ], + "type": "interline_equation", + "content": "\\begin{array}{c} - \\overrightarrow {E _ {x}} - 2 \\\\ \\frac {E _ {x 0}}{E _ {x 0}} \\end{array} + \\frac {E _ {y}}{E _ {y 0}} - \\frac {2 \\overrightarrow {E _ {x}} \\overrightarrow {E _ {y}}}{E _ {x 0} E _ {y 0}} \\cos (\\Delta \\phi) = \\sin^ {2} (\\Delta \\phi),", + "image_path": "3b9a04b650c01395686d3a9bfdacfae1cbb82a50279fcfc2bd4f9817798c7629.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 314, + 399, + 556, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 399, + 556, + 422 + ], + "spans": [ + { + "bbox": [ + 314, + 399, + 556, + 422 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 314, + 399, + 556, + 422 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 314, + 399, + 556, + 422 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 399, + 556, + 422 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 314, + 399, + 556, + 422 + ], + "type": "text", + "content": " are amplitudes of " + }, + { + "bbox": [ + 314, + 399, + 556, + 422 + ], + "type": "inline_equation", + "content": "\\overrightarrow{E}" + }, + { + "bbox": [ + 314, + 399, + 556, + 422 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 399, + 556, + 422 + ], + "type": "inline_equation", + "content": "\\overrightarrow{E}" + }, + { + "bbox": [ + 314, + 399, + 556, + 422 + ], + "type": "text", + "content": ". According to the" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 314, + 422, + 556, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 422, + 556, + 445 + ], + "spans": [ + { + "bbox": [ + 314, + 422, + 556, + 445 + ], + "type": "text", + "content": "value of " + }, + { + "bbox": [ + 314, + 422, + 556, + 445 + ], + "type": "inline_equation", + "content": "\\Delta \\phi" + }, + { + "bbox": [ + 314, + 422, + 556, + 445 + ], + "type": "text", + "content": ", the polarization of " + }, + { + "bbox": [ + 314, + 422, + 556, + 445 + ], + "type": "inline_equation", + "content": "\\overrightarrow{E}" + }, + { + "bbox": [ + 314, + 422, + 556, + 445 + ], + "type": "text", + "content": " can be divided into the following three categories:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 314, + 448, + 558, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 448, + 558, + 491 + ], + "spans": [ + { + "bbox": [ + 314, + 448, + 558, + 491 + ], + "type": "text", + "content": "When " + }, + { + "bbox": [ + 314, + 448, + 558, + 491 + ], + "type": "inline_equation", + "content": "\\Delta \\phi = 0" + }, + { + "bbox": [ + 314, + 448, + 558, + 491 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 314, + 448, + 558, + 491 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 314, + 448, + 558, + 491 + ], + "type": "text", + "content": ": we have " + }, + { + "bbox": [ + 314, + 448, + 558, + 491 + ], + "type": "inline_equation", + "content": "\\overrightarrow{E_y} = \\frac{E_{y0}}{\\pm E_{x0}}\\overrightarrow{E_x}" + }, + { + "bbox": [ + 314, + 448, + 558, + 491 + ], + "type": "text", + "content": ", so the signal is linear polarized (LP), as shown in Fig. 4(a). The polarization direction hinges on " + }, + { + "bbox": [ + 314, + 448, + 558, + 491 + ], + "type": "inline_equation", + "content": "\\pm \\frac{E_{y0}}{E_{x0}}" + }, + { + "bbox": [ + 314, + 448, + 558, + 491 + ], + "type": "text", + "content": ", the ratio of " + }, + { + "bbox": [ + 314, + 448, + 558, + 491 + ], + "type": "inline_equation", + "content": "\\overrightarrow{E_x}" + }, + { + "bbox": [ + 314, + 448, + 558, + 491 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 448, + 558, + 491 + ], + "type": "inline_equation", + "content": "\\overrightarrow{E_y}" + }, + { + "bbox": [ + 314, + 448, + 558, + 491 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 314, + 492, + 559, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 492, + 559, + 571 + ], + "spans": [ + { + "bbox": [ + 314, + 492, + 559, + 571 + ], + "type": "text", + "content": "When " + }, + { + "bbox": [ + 314, + 492, + 559, + 571 + ], + "type": "inline_equation", + "content": "\\Delta \\phi = \\pm \\frac{\\pi}{2}" + }, + { + "bbox": [ + 314, + 492, + 559, + 571 + ], + "type": "text", + "content": ": we have " + }, + { + "bbox": [ + 314, + 492, + 559, + 571 + ], + "type": "inline_equation", + "content": "\\vec{E}^2 + \\vec{E}^2 = \\vec{E}^2" + }, + { + "bbox": [ + 314, + 492, + 559, + 571 + ], + "type": "text", + "content": ", and now the signal is circular polarized (CP), as Fig. 4(b) illustrates. Besides, Fig. 4(d) provides another perspective on how the CP signal is decomposed into two LP signals. Depending on whether " + }, + { + "bbox": [ + 314, + 492, + 559, + 571 + ], + "type": "inline_equation", + "content": "\\Delta \\phi" + }, + { + "bbox": [ + 314, + 492, + 559, + 571 + ], + "type": "text", + "content": " is positive or negative, the rotation direction of the CP signal is in either left-hand circular polarization (LHCP) or right-hand circular polarization (RHCP), which are orthogonal and won't interfere with each other." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 314, + 574, + 559, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 574, + 559, + 608 + ], + "spans": [ + { + "bbox": [ + 314, + 574, + 559, + 608 + ], + "type": "text", + "content": "When " + }, + { + "bbox": [ + 314, + 574, + 559, + 608 + ], + "type": "inline_equation", + "content": "\\Delta \\phi" + }, + { + "bbox": [ + 314, + 574, + 559, + 608 + ], + "type": "text", + "content": " is Other Values: the signal is elliptical polarized (EP), as Fig. 4(c) depicts. Similar to the CP signal, the EP signal also can be divided into left-hand or right-hand." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 314, + 609, + 558, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 609, + 558, + 675 + ], + "spans": [ + { + "bbox": [ + 314, + 609, + 558, + 675 + ], + "type": "text", + "content": "Impact of Polarization on the Rx: The polarization of a signal is accorded with that of its transmitting antenna but may change during propagation. To ensure effective reception, it should match the polarization of the receiving antenna, partially at least. Fig. 5 illustrates how polarization mismatch affects the received signal strength (RSS)." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 314, + 675, + 558, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 675, + 558, + 708 + ], + "spans": [ + { + "bbox": [ + 314, + 675, + 558, + 708 + ], + "type": "text", + "content": "For the LP signal and antenna, RSS decreases as the angle of these two polarization directions increases from " + }, + { + "bbox": [ + 314, + 675, + 558, + 708 + ], + "type": "inline_equation", + "content": "0^{\\circ}" + }, + { + "bbox": [ + 314, + 675, + 558, + 708 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 314, + 675, + 558, + 708 + ], + "type": "inline_equation", + "content": "90^{\\circ}" + }, + { + "bbox": [ + 314, + 675, + 558, + 708 + ], + "type": "text", + "content": ". For the CP signal, the signal can be decomposed into two orthogonal LP" + } + ] + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 57, + 85, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 57, + 85, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 57, + 85, + 68 + ], + "type": "text", + "content": "Bifrost" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 363, + 57, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 363, + 57, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 363, + 57, + 559, + 69 + ], + "type": "text", + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 297, + 731, + 315, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 731, + 315, + 740 + ], + "spans": [ + { + "bbox": [ + 297, + 731, + 315, + 740 + ], + "type": "text", + "content": "378" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 82, + 295, + 278 + ], + "blocks": [ + { + "bbox": [ + 52, + 82, + 295, + 278 + ], + "lines": [ + { + "bbox": [ + 52, + 82, + 295, + 278 + ], + "spans": [ + { + "bbox": [ + 52, + 82, + 295, + 278 + ], + "type": "image", + "image_path": "9c50c21f9f2753fbea75feba6489560ab1bb6bd4e497c4e119371cab5de559c0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 288, + 295, + 308 + ], + "lines": [ + { + "bbox": [ + 50, + 288, + 295, + 308 + ], + "spans": [ + { + "bbox": [ + 50, + 288, + 295, + 308 + ], + "type": "text", + "content": "Figure 5: RSS variation according to the polarization of signals and Rx." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 328, + 295, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 328, + 295, + 415 + ], + "spans": [ + { + "bbox": [ + 50, + 328, + 295, + 415 + ], + "type": "text", + "content": "signals. Thus, the LP antenna can only receive the component whose polarization direction is parallel to itself but loses half of the signal energy. Similarly, the CP antenna can only receive half of the LP signal's energy. However, when LHCP antenna is used to receive RHCP signals or vice versa, RSS is theoretically zero because these two polarizations are orthogonal. That is the reason why Bifrost can eliminate the ambiguity of two FSDM signals radiated from different LWAs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 426, + 185, + 438 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 426, + 185, + 438 + ], + "spans": [ + { + "bbox": [ + 51, + 426, + 185, + 438 + ], + "type": "text", + "content": "2.2 Leaky Wave Antenna" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 441, + 296, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 441, + 296, + 507 + ], + "spans": [ + { + "bbox": [ + 50, + 441, + 296, + 507 + ], + "type": "text", + "content": "LWA belongs to the class of traveling-wave antennas, where the propagating wave inside the antenna structure can \"leak\" (i.e., radiate) from the waveguide to the free space, hence the name. It can distinctively couple the leaky wave's frequency and radiation direction to produce a frequency and spatial division multiplexing (FSDM) signal, as shown in Fig. 6. Specifically, direction of the" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 511, + 252, + 533 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 511, + 252, + 533 + ], + "spans": [ + { + "bbox": [ + 51, + 511, + 252, + 533 + ], + "type": "interline_equation", + "content": "\\text {s i g n a l} \\overrightarrow {E _ {f}} \\text {w i t h f r e q u e n c y} f \\text {c a n b e d e t e r m i n e d b y [ 7 1 ] :}", + "image_path": "3946471648e2f53715aed9d26a903ab60efe071d01f317f4210258f9049122a7.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 128, + 533, + 294, + 548 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 533, + 294, + 548 + ], + "spans": [ + { + "bbox": [ + 128, + 533, + 294, + 548 + ], + "type": "interline_equation", + "content": "\\theta (f) = \\operatorname {a r c c o s} _ {k _ {0} (f)} ^ {\\prime} \\tag {2}", + "image_path": "1187de196e827f33d1610677a39a09bcda34961935e40152fb7ccba567ea5298.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 554, + 295, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 554, + 295, + 577 + ], + "spans": [ + { + "bbox": [ + 50, + 554, + 295, + 577 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 554, + 295, + 577 + ], + "type": "inline_equation", + "content": "\\beta (f)" + }, + { + "bbox": [ + 50, + 554, + 295, + 577 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 554, + 295, + 577 + ], + "type": "inline_equation", + "content": "k_{0}(f)" + }, + { + "bbox": [ + 50, + 554, + 295, + 577 + ], + "type": "text", + "content": " are the phase constant along the LWA and the propagation constant in the free space w.r.t " + }, + { + "bbox": [ + 50, + 554, + 295, + 577 + ], + "type": "inline_equation", + "content": "E_{f}" + }, + { + "bbox": [ + 50, + 554, + 295, + 577 + ], + "type": "text", + "content": " [52]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 577, + 295, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 577, + 295, + 686 + ], + "spans": [ + { + "bbox": [ + 50, + 577, + 295, + 686 + ], + "type": "text", + "content": "Currently, two main types of LWAs have been extensively studied. 1) The uniform LWA, which employs a metallic waveguide with a slit cut along its length [21, 22, 42, 76], as depicted in Fig. 6(b). The FSDM signal leaked from a uniform LWA can only propagate towards the forward region (i.e., " + }, + { + "bbox": [ + 50, + 577, + 295, + 686 + ], + "type": "inline_equation", + "content": "\\theta^0" + }, + { + "bbox": [ + 50, + 577, + 295, + 686 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 50, + 577, + 295, + 686 + ], + "type": "inline_equation", + "content": "90^\\circ" + }, + { + "bbox": [ + 50, + 577, + 295, + 686 + ], + "type": "text", + "content": "). 2) The periodic LWA, which is typically designed using a dielectric substrate with a periodic array of metal strips (i.e., slots) [10-13] and similar to an antenna array, as shown in Fig. 6(a). The FSDM signal of this type of LWA can propagate towards both forward and backward regions (i.e., " + }, + { + "bbox": [ + 50, + 577, + 295, + 686 + ], + "type": "inline_equation", + "content": "\\theta^0" + }, + { + "bbox": [ + 50, + 577, + 295, + 686 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 50, + 577, + 295, + 686 + ], + "type": "inline_equation", + "content": "180^\\circ" + }, + { + "bbox": [ + 50, + 577, + 295, + 686 + ], + "type": "text", + "content": ") [33]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 686, + 293, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 686, + 293, + 708 + ], + "spans": [ + { + "bbox": [ + 50, + 686, + 293, + 708 + ], + "type": "text", + "content": "Periodic LWA has been widely studied in recent research due to its versatile slot design and low-cost fabrication using the printed" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 318, + 82, + 558, + 182 + ], + "blocks": [ + { + "bbox": [ + 318, + 82, + 558, + 182 + ], + "lines": [ + { + "bbox": [ + 318, + 82, + 558, + 182 + ], + "spans": [ + { + "bbox": [ + 318, + 82, + 558, + 182 + ], + "type": "image", + "image_path": "36b40804dcad7a772c81b3692c34777cfdd86cb7e5c8e87d5c00d280bc55e7a9.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 341, + 186, + 531, + 197 + ], + "lines": [ + { + "bbox": [ + 341, + 186, + 531, + 197 + ], + "spans": [ + { + "bbox": [ + 341, + 186, + 531, + 197 + ], + "type": "text", + "content": "Figure 6: Typical structures of leaky wave antenna3." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 217, + 559, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 217, + 559, + 250 + ], + "spans": [ + { + "bbox": [ + 314, + 217, + 559, + 250 + ], + "type": "text", + "content": "circuit board (PCB) process. These attributes have made it a popular choice in various applications. Bifrost also employs the periodic structure to produce circular polarized signals." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 259, + 383, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 259, + 383, + 270 + ], + "spans": [ + { + "bbox": [ + 315, + 259, + 383, + 270 + ], + "type": "text", + "content": "3 BIFROST" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 273, + 559, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 273, + 559, + 318 + ], + "spans": [ + { + "bbox": [ + 314, + 273, + 559, + 318 + ], + "type": "text", + "content": "In this section, we first articulate how to design the circular polarized LWA (i.e., CPLWA) to transform the input LP signal into the CP signal with the FSDM feature. Then, we present details of our approach of localization with the CPLWA." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 327, + 418, + 341 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 327, + 418, + 341 + ], + "spans": [ + { + "bbox": [ + 315, + 327, + 418, + 341 + ], + "type": "text", + "content": "1.1 CPLWA Design" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 342, + 559, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 342, + 559, + 479 + ], + "spans": [ + { + "bbox": [ + 314, + 342, + 559, + 479 + ], + "type": "text", + "content": "Unlike many traditional LWAs [10, 13, 21, 22], Bifrost utilizes " + }, + { + "bbox": [ + 314, + 342, + 559, + 479 + ], + "type": "inline_equation", + "content": "\\mathrm{CP^4}" + }, + { + "bbox": [ + 314, + 342, + 559, + 479 + ], + "type": "text", + "content": " (i.e., RHCP and LHCP) to distinguish different LWAs and corresponding FSDM signals. We specially design a CPLWA that can generate both LHCP and RHCP signals. As shown in Fig. 7(a), our CPLWA has both vertical and horizontal slots to generate orthogonal LP signals, and further to form the CP signal (the bifurcation is designed for performance optimization). According to Eq. (1), a " + }, + { + "bbox": [ + 314, + 342, + 559, + 479 + ], + "type": "inline_equation", + "content": "\\frac{\\pi}{2}" + }, + { + "bbox": [ + 314, + 342, + 559, + 479 + ], + "type": "text", + "content": " phase difference between two LP signals is necessary to generate the CP signal, and this is achieved by adjusting the length of the slots. Denoting the guided wavelength at " + }, + { + "bbox": [ + 314, + 342, + 559, + 479 + ], + "type": "inline_equation", + "content": "5.25\\mathrm{GHz}" + }, + { + "bbox": [ + 314, + 342, + 559, + 479 + ], + "type": "text", + "content": " of the substrate material is " + }, + { + "bbox": [ + 314, + 342, + 559, + 479 + ], + "type": "inline_equation", + "content": "\\lambda_{g}" + }, + { + "bbox": [ + 314, + 342, + 559, + 479 + ], + "type": "text", + "content": ", the distance between the center of the horizontal and the vertical slots is " + }, + { + "bbox": [ + 314, + 342, + 559, + 479 + ], + "type": "inline_equation", + "content": "\\frac{\\lambda_g}{4}" + }, + { + "bbox": [ + 314, + 342, + 559, + 479 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 478, + 559, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 478, + 559, + 587 + ], + "spans": [ + { + "bbox": [ + 314, + 478, + 559, + 587 + ], + "type": "text", + "content": "In the fabrication process of CPLWA, we adopt a two-layer copper-clad substrate structure, as shown in Fig. 7(b). The substrate material is F4BM-2, whose permittivity " + }, + { + "bbox": [ + 314, + 478, + 559, + 587 + ], + "type": "inline_equation", + "content": "\\epsilon = 3.02" + }, + { + "bbox": [ + 314, + 478, + 559, + 587 + ], + "type": "text", + "content": ". The top and bottom layers of the substrate consist of copper and have undergone tin immersion plating to prevent oxidation. The bottom layer of copper functions as the ground, and the shorting vias are incorporated to penetrate the substrate, connecting the top and bottom layers in order to ground the top layer. These shorting vias are periodically arranged on the upper and lower boundaries of the substrate and the patch." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 587, + 560, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 587, + 560, + 665 + ], + "spans": [ + { + "bbox": [ + 313, + 587, + 560, + 665 + ], + "type": "text", + "content": "The final structure of our proposed CPLWA is depicted in Fig. 7(c), where multiple units are linearly arranged together to enhance the directivity of the FSDM signal, which is similar to the antenna array. Note that a CPLWA is composed of 6 units as an illustration, but 11 units are arranged in practice. This CPLWA features two ports on both ends: one is the feed port that connects to an LP antenna for absorbing the WiFi signal, and the other should connect to a" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 57, + 246, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 57, + 246, + 68 + ], + "spans": [ + { + "bbox": [ + 51, + 57, + 246, + 68 + ], + "type": "text", + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "spans": [ + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "type": "text", + "content": "Yimiao Sun, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 314, + 673, + 559, + 698 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 673, + 559, + 698 + ], + "spans": [ + { + "bbox": [ + 314, + 673, + 559, + 698 + ], + "type": "text", + "content": "It is worth noting that the 2D radiation pattern is used here for illustration purposes. In reality, the radiation pattern of the leaky wave with a specific frequency is more like a cone, with a generatrix along the propagation direction of the traveling wave." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 315, + 698, + 543, + 708 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 698, + 543, + 708 + ], + "spans": [ + { + "bbox": [ + 315, + 698, + 543, + 708 + ], + "type": "text", + "content": "4Unless otherwise specified, CP signals stand for both RHCP and LHCP signals." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 297, + 731, + 315, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 731, + 315, + 740 + ], + "spans": [ + { + "bbox": [ + 297, + 731, + 315, + 740 + ], + "type": "text", + "content": "379" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 82, + 168, + 163 + ], + "blocks": [ + { + "bbox": [ + 75, + 82, + 168, + 163 + ], + "lines": [ + { + "bbox": [ + 75, + 82, + 168, + 163 + ], + "spans": [ + { + "bbox": [ + 75, + 82, + 168, + 163 + ], + "type": "image", + "image_path": "7d0df6daa0e60f943e48d51af1e118545c15659453e80866a132fc5dabc9f7dc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 78, + 165, + 151, + 174 + ], + "lines": [ + { + "bbox": [ + 78, + 165, + 151, + 174 + ], + "spans": [ + { + "bbox": [ + 78, + 165, + 151, + 174 + ], + "type": "text", + "content": "(a) Unit of the LWA." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 175, + 86, + 270, + 158 + ], + "blocks": [ + { + "bbox": [ + 175, + 86, + 270, + 158 + ], + "lines": [ + { + "bbox": [ + 175, + 86, + 270, + 158 + ], + "spans": [ + { + "bbox": [ + 175, + 86, + 270, + 158 + ], + "type": "image", + "image_path": "a4811d8b72e0b779962645388f118c8dadfe40b3817213814d85446162eba8f8.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 187, + 165, + 264, + 175 + ], + "lines": [ + { + "bbox": [ + 187, + 165, + 264, + 175 + ], + "spans": [ + { + "bbox": [ + 187, + 165, + 264, + 175 + ], + "type": "text", + "content": "(b) Layered structure." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 54, + 180, + 293, + 231 + ], + "blocks": [ + { + "bbox": [ + 54, + 180, + 293, + 231 + ], + "lines": [ + { + "bbox": [ + 54, + 180, + 293, + 231 + ], + "spans": [ + { + "bbox": [ + 54, + 180, + 293, + 231 + ], + "type": "image", + "image_path": "77325214c9a5a697d9c27756c1cfc8c1669913385909b07abdad9bc4bfa1f232.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 129, + 234, + 202, + 244 + ], + "lines": [ + { + "bbox": [ + 129, + 234, + 202, + 244 + ], + "spans": [ + { + "bbox": [ + 129, + 234, + 202, + 244 + ], + "type": "text", + "content": "(c) Complete design." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 80, + 248, + 264, + 259 + ], + "lines": [ + { + "bbox": [ + 80, + 248, + 264, + 259 + ], + "spans": [ + { + "bbox": [ + 80, + 248, + 264, + 259 + ], + "type": "text", + "content": "Figure 7: General view of CPLWA used in Bifrost." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 279, + 294, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 279, + 294, + 334 + ], + "spans": [ + { + "bbox": [ + 50, + 279, + 294, + 334 + ], + "type": "text", + "content": "matched " + }, + { + "bbox": [ + 50, + 279, + 294, + 334 + ], + "type": "inline_equation", + "content": "50\\Omega" + }, + { + "bbox": [ + 50, + 279, + 294, + 334 + ], + "type": "text", + "content": " load. By changing the signal feed port, polarization of the FSDM signal can switch between LHCP and RHCP. If the input signal has gone through all slots and reached the other end, yet still has energy remaining, the matched load will absorb the excess signal." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 335, + 294, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 335, + 294, + 378 + ], + "spans": [ + { + "bbox": [ + 50, + 335, + 294, + 378 + ], + "type": "text", + "content": "The CPLWA used in Bifrost is specially designed at 5.17GHz-5.33GHz WiFi band, while this structure and design methodology are universally applicable for other frequencies and bandwidths by properly modifying the relevant parameters." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 379, + 295, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 379, + 295, + 510 + ], + "spans": [ + { + "bbox": [ + 50, + 379, + 295, + 510 + ], + "type": "text", + "content": "Now we conduct a quick validation to show the key performance of the proposed CPLWA using ANSYS HFSS. Firstly, the direction of the FSDM signal " + }, + { + "bbox": [ + 50, + 379, + 295, + 510 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 50, + 379, + 295, + 510 + ], + "type": "text", + "content": " r.t different frequencies is depicted in Fig. 8(a). There is a total " + }, + { + "bbox": [ + 50, + 379, + 295, + 510 + ], + "type": "inline_equation", + "content": "22^{\\circ}" + }, + { + "bbox": [ + 50, + 379, + 295, + 510 + ], + "type": "text", + "content": " field of view (FoV) across the operating frequency band (5.17GHz-5.33GHz). Note that when the LP signal is fed into the right port or left port, the RHCP or LHCP signal will be radiated from " + }, + { + "bbox": [ + 50, + 379, + 295, + 510 + ], + "type": "inline_equation", + "content": "22^{\\circ}" + }, + { + "bbox": [ + 50, + 379, + 295, + 510 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 50, + 379, + 295, + 510 + ], + "type": "inline_equation", + "content": "44^{\\circ}" + }, + { + "bbox": [ + 50, + 379, + 295, + 510 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 50, + 379, + 295, + 510 + ], + "type": "inline_equation", + "content": "136^{\\circ}" + }, + { + "bbox": [ + 50, + 379, + 295, + 510 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 50, + 379, + 295, + 510 + ], + "type": "inline_equation", + "content": "158^{\\circ}" + }, + { + "bbox": [ + 50, + 379, + 295, + 510 + ], + "type": "text", + "content": ", respectively. Fig. 8(b) shows the energy distribution of signals at five different frequencies. It is evident that the energy of the leaky signal concentrates on the correct direction, and their realized gains are all above 11.5dB. Therefore, the direction can be easily identified by examining the energy distribution of signals." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 510, + 295, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 510, + 295, + 532 + ], + "spans": [ + { + "bbox": [ + 50, + 510, + 295, + 532 + ], + "type": "text", + "content": "With the proposed CPLWA, we will proceed with elaborating on the core localization algorithm in Bifrost." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 542, + 200, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 542, + 200, + 553 + ], + "spans": [ + { + "bbox": [ + 51, + 542, + 200, + 553 + ], + "type": "text", + "content": "1.2 Basic Localization Model" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "inline_equation", + "content": "S_{l}" + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "inline_equation", + "content": "S_{r}" + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "text", + "content": " respectively denote LHCP and RHCP signals that propagate from corresponding LWAs to the target via the LoS paths. The frequencies of these two signals, " + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "inline_equation", + "content": "f_{l}" + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "inline_equation", + "content": "f_{r}" + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "text", + "content": ", are what we desire for calculating the location. Recall that " + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "inline_equation", + "content": "S_{l}" + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "inline_equation", + "content": "S_{r}" + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "text", + "content": " are featured in frequency and space division multiplexing (FSDM) and orthogonal " + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "inline_equation", + "content": "\\mathrm{CP}^5" + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "text", + "content": ", so these two signals won't interfere with each other. As a result, the target can estimate its relative direction to both LWAs based on the received spectrum and the radiation pattern of the two LWAs. Further, given locations of two LWAs, " + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "inline_equation", + "content": "L_{r}" + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "inline_equation", + "content": "\\kappa_{r}" + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "inline_equation", + "content": "y_{r}" + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "inline_equation", + "content": "z_{r}" + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "text", + "content": " of the RHCP LWA and " + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "inline_equation", + "content": "L_{l}" + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "inline_equation", + "content": "\\kappa_{l}" + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "inline_equation", + "content": "y_{l}" + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "inline_equation", + "content": "z_{l}" + }, + { + "bbox": [ + 50, + 556, + 295, + 689 + ], + "type": "text", + "content": " of the LHCP LWA, the target can output its absolute location. In detail, as we mentioned in §2, the radiation pattern of the LWA is a conical" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 317, + 86, + 441, + 196 + ], + "blocks": [ + { + "bbox": [ + 317, + 86, + 441, + 196 + ], + "lines": [ + { + "bbox": [ + 317, + 86, + 441, + 196 + ], + "spans": [ + { + "bbox": [ + 317, + 86, + 441, + 196 + ], + "type": "image", + "image_path": "b24fbb91f95a7b3b3fd9b8a04a914b391287c260958fa8809ac3f9870b57a917.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 329, + 200, + 419, + 209 + ], + "lines": [ + { + "bbox": [ + 329, + 200, + 419, + 209 + ], + "spans": [ + { + "bbox": [ + 329, + 200, + 419, + 209 + ], + "type": "text", + "content": "(a) Main beam direction." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 371, + 217, + 504, + 227 + ], + "lines": [ + { + "bbox": [ + 371, + 217, + 504, + 227 + ], + "spans": [ + { + "bbox": [ + 371, + 217, + 504, + 227 + ], + "type": "text", + "content": "Figure 8: Key results of the CPLWA." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 447, + 86, + 559, + 196 + ], + "blocks": [ + { + "bbox": [ + 447, + 86, + 559, + 196 + ], + "lines": [ + { + "bbox": [ + 447, + 86, + 559, + 196 + ], + "spans": [ + { + "bbox": [ + 447, + 86, + 559, + 196 + ], + "type": "image", + "image_path": "6fb82dec0dccfa3d8c1ed30bcc9878629f29b2090b1d4daa1b5f62c3ee02be85.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 468, + 200, + 529, + 209 + ], + "lines": [ + { + "bbox": [ + 468, + 200, + 529, + 209 + ], + "spans": [ + { + "bbox": [ + 468, + 200, + 529, + 209 + ], + "type": "text", + "content": "(b) Realized gain." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 245, + 558, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 245, + 558, + 298 + ], + "spans": [ + { + "bbox": [ + 314, + 245, + 558, + 298 + ], + "type": "text", + "content": "surface at a specific frequency. Therefore, the location " + }, + { + "bbox": [ + 314, + 245, + 558, + 298 + ], + "type": "inline_equation", + "content": "L_{t}" + }, + { + "bbox": [ + 314, + 245, + 558, + 298 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 314, + 245, + 558, + 298 + ], + "type": "inline_equation", + "content": "x_{t}, y_{t}" + }, + { + "bbox": [ + 314, + 245, + 558, + 298 + ], + "type": "text", + "content": ") of the target device is the intersection point of the two conical surfaces and the horizontal plane of its height. By combining these conditions, " + }, + { + "bbox": [ + 314, + 245, + 558, + 298 + ], + "type": "inline_equation", + "content": "L_{t}" + }, + { + "bbox": [ + 314, + 245, + 558, + 298 + ], + "type": "text", + "content": " can be estimated by solving the following equation set:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 388, + 299, + 556, + 327 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 299, + 556, + 327 + ], + "spans": [ + { + "bbox": [ + 388, + 299, + 556, + 327 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} F \\left(L _ {r}, f _ {r}\\right), \\\\ L _ {t} = \\left(x _ {t}, y _ {t}\\right) = F \\left(L _ {l}, f _ {l}\\right) \\tag {3} \\\\ \\end{array}", + "image_path": "96f8834ba4b59d1758ae3218cacfd6e013ddbc0795a8006f7b66b552dc15065a.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 314, + 337, + 559, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 337, + 559, + 403 + ], + "spans": [ + { + "bbox": [ + 314, + 337, + 559, + 403 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 314, + 337, + 559, + 403 + ], + "type": "inline_equation", + "content": "z_{t}" + }, + { + "bbox": [ + 314, + 337, + 559, + 403 + ], + "type": "text", + "content": " is the target's height; functions " + }, + { + "bbox": [ + 314, + 337, + 559, + 403 + ], + "type": "inline_equation", + "content": "F(r, f_{r})" + }, + { + "bbox": [ + 314, + 337, + 559, + 403 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 337, + 559, + 403 + ], + "type": "inline_equation", + "content": "F(l, f_{l})" + }, + { + "bbox": [ + 314, + 337, + 559, + 403 + ], + "type": "text", + "content": " are mathematical equations of conical surfaces with the location of LWAs as the vertex. These two equations indicate the propagation directions of RHCP and LHCP signals at frequencies " + }, + { + "bbox": [ + 314, + 337, + 559, + 403 + ], + "type": "inline_equation", + "content": "f_{r}" + }, + { + "bbox": [ + 314, + 337, + 559, + 403 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 337, + 559, + 403 + ], + "type": "inline_equation", + "content": "f_{l}" + }, + { + "bbox": [ + 314, + 337, + 559, + 403 + ], + "type": "text", + "content": ", respectively. Taking the RHCP signal as an example, " + }, + { + "bbox": [ + 314, + 337, + 559, + 403 + ], + "type": "inline_equation", + "content": "F = F(L_{r}, f_{r})" + }, + { + "bbox": [ + 314, + 337, + 559, + 403 + ], + "type": "text", + "content": " can be formulated as" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 363, + 407, + 561, + 430 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 363, + 407, + 561, + 430 + ], + "spans": [ + { + "bbox": [ + 363, + 407, + 561, + 430 + ], + "type": "interline_equation", + "content": "F = (x - x _ {r}) ^ {2} - \\frac {(y - y _ {r}) ^ {2}}{a ^ {2}} - \\frac {(z - z _ {r}) ^ {2}}{a ^ {2}}, \\tag {4}", + "image_path": "20417a2bd2ea76cbcf6c5dd2a19149fcaedc99406da14a30b7f2041a0516dc57.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 314, + 431, + 398, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 431, + 398, + 442 + ], + "spans": [ + { + "bbox": [ + 314, + 431, + 398, + 442 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 314, + 431, + 398, + 442 + ], + "type": "inline_equation", + "content": "a = \\cot [\\theta (f_r)]" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 313, + 443, + 559, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 443, + 559, + 541 + ], + "spans": [ + { + "bbox": [ + 313, + 443, + 559, + 541 + ], + "type": "text", + "content": "However, there are two other types of signals impacting the localization accuracy when Bifrost functions: 1) LP WiFi signal that is emitted by the WiFi AP, and then received by the target. This signal establishes data communication between the target and the AP and propagates in both the LoS path and multipath. It is also the input signal of LWAs, which will be transformed into FSDM signals by the LWAs. 2) CP multipath signal that propagates from LWAs to the target after reflection, resulting in undesired noisy signals at the target." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 314, + 542, + 560, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 542, + 560, + 586 + ], + "spans": [ + { + "bbox": [ + 314, + 542, + 560, + 586 + ], + "type": "text", + "content": "Thus, we should first identify the frequency of the FSDM signal from the LP WiFi signal (discussed in §3.3) and then filter out the CP multipath signal as much as possible (discussed in §3.4 and §3.5), to accurately estimate frequencies, " + }, + { + "bbox": [ + 314, + 542, + 560, + 586 + ], + "type": "inline_equation", + "content": "f_{l}" + }, + { + "bbox": [ + 314, + 542, + 560, + 586 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 542, + 560, + 586 + ], + "type": "inline_equation", + "content": "f_{r}" + }, + { + "bbox": [ + 314, + 542, + 560, + 586 + ], + "type": "text", + "content": ", and the target's location." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 315, + 594, + 527, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 594, + 527, + 608 + ], + "spans": [ + { + "bbox": [ + 315, + 594, + 527, + 608 + ], + "type": "text", + "content": "1.3 Identifying Frequencies of CP signals" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 313, + 609, + 559, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 609, + 559, + 708 + ], + "spans": [ + { + "bbox": [ + 313, + 609, + 559, + 708 + ], + "type": "text", + "content": "When Bifrost functions, LWAs need the LP WiFi signal as input, and the target device may also need it for data communication with the WiFi AP. Nevertheless, the LP signal may interfere with the reception of the CP signal, because CP antennas at the target device can receive the LP signal (as already explained in §2). To cancel this interference, we control LWAs to be periodically turned on and off, working in a duty-cycled manner. This design allows the target to identify frequencies that correspond to the CP signal by analyzing the variation in its received spectrum, and at the same" + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 57, + 84, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 57, + 84, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 57, + 84, + 68 + ], + "type": "text", + "content": "Bifrost" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 364, + 57, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 364, + 57, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 364, + 57, + 558, + 68 + ], + "type": "text", + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 698, + 238, + 708 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 698, + 238, + 708 + ], + "spans": [ + { + "bbox": [ + 51, + 698, + 238, + 708 + ], + "type": "text", + "content": "5 Unless stated otherwise, CP signals have the property of FSDM." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 297, + 731, + 315, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 731, + 315, + 740 + ], + "spans": [ + { + "bbox": [ + 297, + 731, + 315, + 740 + ], + "type": "text", + "content": "380" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 78, + 294, + 149 + ], + "blocks": [ + { + "bbox": [ + 53, + 78, + 294, + 149 + ], + "lines": [ + { + "bbox": [ + 53, + 78, + 294, + 149 + ], + "spans": [ + { + "bbox": [ + 53, + 78, + 294, + 149 + ], + "type": "image", + "image_path": "279daa2a6c56e4e94198173bb12843489cab9266b1f6d6bb7f2e5b98f6a29900.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 107, + 152, + 237, + 163 + ], + "lines": [ + { + "bbox": [ + 107, + 152, + 237, + 163 + ], + "spans": [ + { + "bbox": [ + 107, + 152, + 237, + 163 + ], + "type": "text", + "content": "(a) Normalized amplitude variation." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 53, + 175, + 294, + 236 + ], + "blocks": [ + { + "bbox": [ + 53, + 175, + 294, + 236 + ], + "lines": [ + { + "bbox": [ + 53, + 175, + 294, + 236 + ], + "spans": [ + { + "bbox": [ + 53, + 175, + 294, + 236 + ], + "type": "image", + "image_path": "c4cebafb43021fd59b73e2a54abe86e3e358b8ba47278543a6279387ef8f17fb.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 114, + 239, + 231, + 249 + ], + "lines": [ + { + "bbox": [ + 114, + 239, + 231, + 249 + ], + "spans": [ + { + "bbox": [ + 114, + 239, + 231, + 249 + ], + "type": "text", + "content": "(b) Normalized phase variation." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 105, + 254, + 238, + 264 + ], + "lines": [ + { + "bbox": [ + 105, + 254, + 238, + 264 + ], + "spans": [ + { + "bbox": [ + 105, + 254, + 238, + 264 + ], + "type": "text", + "content": "Figure 9: Standardized CSI variation." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 279, + 296, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 279, + 296, + 388 + ], + "spans": [ + { + "bbox": [ + 50, + 279, + 296, + 388 + ], + "type": "text", + "content": "time, saves energy of LWAs. Specifically, we exploit WiFi CSI [27, 28, 75, 78] to explore fine-grained information on the amplitude and phase of the subcarriers. Fig. 9 illustrates the result of a proof-of-concept experiment, where subcarriers correspond to LoS and multipath signals are distinguishable in the normalized amplitude of CSI. However, the variation in phase is not obvious, making it challenging to discern useful subcarriers because they are often obscured by random errors and noise. According to this result, we can only extract frequencies of the CP signal based on the amplitude variation in CSI." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 388, + 296, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 388, + 296, + 444 + ], + "spans": [ + { + "bbox": [ + 51, + 388, + 296, + 444 + ], + "type": "text", + "content": "As a LWA turns on or off, we denote the corresponding CSI as " + }, + { + "bbox": [ + 51, + 388, + 296, + 444 + ], + "type": "inline_equation", + "content": "H_{on}(f_k)" + }, + { + "bbox": [ + 51, + 388, + 296, + 444 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 51, + 388, + 296, + 444 + ], + "type": "inline_equation", + "content": "H_{off}(f_k)" + }, + { + "bbox": [ + 51, + 388, + 296, + 444 + ], + "type": "text", + "content": " for the " + }, + { + "bbox": [ + 51, + 388, + 296, + 444 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 51, + 388, + 296, + 444 + ], + "type": "text", + "content": "-th subcarrier with center frequency " + }, + { + "bbox": [ + 51, + 388, + 296, + 444 + ], + "type": "inline_equation", + "content": "f_k" + }, + { + "bbox": [ + 51, + 388, + 296, + 444 + ], + "type": "text", + "content": ", respectively. The former is jointly influenced by CP and LP signals, while the latter is determined by the LP signal only, leading to the following relationship:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 449, + 294, + 482 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 449, + 294, + 482 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 294, + 482 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| H _ {o n} \\left(f _ {k}\\right) \\right\\| = \\left\\| H ^ {C P} \\left(f _ {k}\\right) + H ^ {L P} \\left(f _ {k}\\right) \\right\\|, \\\\ \\left\\| H _ {o f f} \\left(f _ {k}\\right) \\right\\| = \\left\\| H ^ {L P} \\left(f _ {k}\\right) \\right\\|, \\tag {5} \\\\ \\end{array}", + "image_path": "790477f0bf5b12003888f702c24b80b8190c976c83a56c38334149a394dda126.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 487, + 295, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 487, + 295, + 531 + ], + "spans": [ + { + "bbox": [ + 50, + 487, + 295, + 531 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 487, + 295, + 531 + ], + "type": "inline_equation", + "content": "|H^{CP}f_k|" + }, + { + "bbox": [ + 50, + 487, + 295, + 531 + ], + "type": "text", + "content": " is the amplitude of subcarriers corresponding to the CP signal, and " + }, + { + "bbox": [ + 50, + 487, + 295, + 531 + ], + "type": "inline_equation", + "content": "\\| H^{LP}(f_k)\\|" + }, + { + "bbox": [ + 50, + 487, + 295, + 531 + ], + "type": "text", + "content": " is that of the LP signal. Based on these two values, we can quantify the variation of CSI caused by the CP signal:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 83, + 531, + 295, + 582 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 531, + 295, + 582 + ], + "spans": [ + { + "bbox": [ + 83, + 531, + 295, + 582 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\| \\Delta H (f) \\| = \\| H ^ {C P} (f) \\| _ {k} \\\\ = \\| H ^ {C P} \\left(f _ {k}\\right) + H ^ {L P} \\left(f _ {k}\\right) \\| - \\| H ^ {L P} \\left(f _ {k}\\right) \\| \\tag {6} \\\\ = \\| H _ {o n} \\left(f _ {k}\\right) \\| - \\| H _ {o f f} \\left(f _ {k}\\right) \\| \\\\ \\end{array}", + "image_path": "f242b53b6c4f836190beb8603fe576dde41c2a973fa828c47af862af1cdb72d7.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 587, + 296, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 587, + 296, + 708 + ], + "spans": [ + { + "bbox": [ + 50, + 587, + 296, + 708 + ], + "type": "text", + "content": "In order to accurately analyze this variation and mitigate the effect of occasional outliers and noise, a Z-Score normalization procedure is performed on " + }, + { + "bbox": [ + 50, + 587, + 296, + 708 + ], + "type": "inline_equation", + "content": "\\Delta H(f_{\\mathbb{H}})" + }, + { + "bbox": [ + 50, + 587, + 296, + 708 + ], + "type": "text", + "content": ". We execute a preliminary screening to quickly filter out the subcarriers that are less likely corresponding to the frequencies of the CP signals. A percentage threshold " + }, + { + "bbox": [ + 50, + 587, + 296, + 708 + ], + "type": "inline_equation", + "content": "\\varepsilon \\in \\mathbb{P}, 1" + }, + { + "bbox": [ + 50, + 587, + 296, + 708 + ], + "type": "text", + "content": " is set to select subcarriers with a larger value of " + }, + { + "bbox": [ + 50, + 587, + 296, + 708 + ], + "type": "inline_equation", + "content": "\\Delta H f_{k}" + }, + { + "bbox": [ + 50, + 587, + 296, + 708 + ], + "type": "text", + "content": " indicating that these subcarriers undergo significant changes and are more likely to be affected by the CP signal. The value of " + }, + { + "bbox": [ + 50, + 587, + 296, + 708 + ], + "type": "inline_equation", + "content": "\\varepsilon" + }, + { + "bbox": [ + 50, + 587, + 296, + 708 + ], + "type": "text", + "content": " is chosen empirically based on the degree of multipath. Fig. 10(a) shows a high-level overview of the selected subcarriers, where LHCP and RHCP signals are highlighted in red and blue," + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 317, + 83, + 561, + 201 + ], + "blocks": [ + { + "bbox": [ + 317, + 83, + 561, + 201 + ], + "lines": [ + { + "bbox": [ + 317, + 83, + 561, + 201 + ], + "spans": [ + { + "bbox": [ + 317, + 83, + 561, + 201 + ], + "type": "image", + "image_path": "bc61a0518220150b607c1d66cc7dc6947f380c2b1009e36ae95bd6ebef2eccde.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 367, + 208, + 507, + 219 + ], + "lines": [ + { + "bbox": [ + 367, + 208, + 507, + 219 + ], + "spans": [ + { + "bbox": [ + 367, + 208, + 507, + 219 + ], + "type": "text", + "content": "(a) Selecting frequencies of CP signals." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 317, + 232, + 561, + 310 + ], + "blocks": [ + { + "bbox": [ + 317, + 232, + 561, + 310 + ], + "lines": [ + { + "bbox": [ + 317, + 232, + 561, + 310 + ], + "spans": [ + { + "bbox": [ + 317, + 232, + 561, + 310 + ], + "type": "image", + "image_path": "b8947759e9bead7e2b64a1301febbf053eed3a0fa8aef46ad73d69ef35fc88f1.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 374, + 316, + 500, + 327 + ], + "lines": [ + { + "bbox": [ + 374, + 316, + 500, + 327 + ], + "spans": [ + { + "bbox": [ + 374, + 316, + 500, + 327 + ], + "type": "text", + "content": "(b) Filtering out multipath signals." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 345, + 345, + 410, + 418 + ], + "blocks": [ + { + "bbox": [ + 345, + 345, + 410, + 418 + ], + "lines": [ + { + "bbox": [ + 345, + 345, + 410, + 418 + ], + "spans": [ + { + "bbox": [ + 345, + 345, + 410, + 418 + ], + "type": "image", + "image_path": "c0ca63a466b48a65700cdca67ed5ce4af8d69cd93ba98cefd9921e9d28cfe97c.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 337, + 425, + 414, + 436 + ], + "lines": [ + { + "bbox": [ + 337, + 425, + 414, + 436 + ], + "spans": [ + { + "bbox": [ + 337, + 425, + 414, + 436 + ], + "type": "text", + "content": "(c) Align subcarriers." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 314, + 445, + 559, + 467 + ], + "lines": [ + { + "bbox": [ + 314, + 445, + 559, + 467 + ], + "spans": [ + { + "bbox": [ + 314, + 445, + 559, + 467 + ], + "type": "text", + "content": "Figure 10: Workflow of selecting correct frequencies (LHCP and RHCP are distinguished by red and blue colors)." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 443, + 344, + 531, + 412 + ], + "blocks": [ + { + "bbox": [ + 443, + 344, + 531, + 412 + ], + "lines": [ + { + "bbox": [ + 443, + 344, + 531, + 412 + ], + "spans": [ + { + "bbox": [ + 443, + 344, + 531, + 412 + ], + "type": "image", + "image_path": "75fa5e29537f55972cfe2ab54947c4583cfc8202904c13b1ea132fa3d329ee66.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 438, + 425, + 529, + 436 + ], + "lines": [ + { + "bbox": [ + 438, + 425, + 529, + 436 + ], + "spans": [ + { + "bbox": [ + 438, + 425, + 529, + 436 + ], + "type": "text", + "content": "(d) Estimate frequencies." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 485, + 558, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 485, + 558, + 507 + ], + "spans": [ + { + "bbox": [ + 314, + 485, + 558, + 507 + ], + "type": "text", + "content": "respectively. In subsequent stages, we exclusively focus on these selected subcarriers." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 315, + 517, + 508, + 530 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 517, + 508, + 530 + ], + "spans": [ + { + "bbox": [ + 315, + 517, + 508, + 530 + ], + "type": "text", + "content": "1.4 Filtering out the Multipath Signal" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 531, + 561, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 531, + 561, + 674 + ], + "spans": [ + { + "bbox": [ + 313, + 531, + 561, + 674 + ], + "type": "text", + "content": "As shown in Fig. 9(a), even though we have identified the frequencies of the CP signal from the WiFi signal, there still exists the multipath signal, resulting in undesired variation in " + }, + { + "bbox": [ + 313, + 531, + 561, + 674 + ], + "type": "inline_equation", + "content": "\\Delta H" + }, + { + "bbox": [ + 313, + 531, + 561, + 674 + ], + "type": "text", + "content": ". Note that the multipath signal is mainly introduced by reflection of the CP FSDM signal. We find that subcarriers corresponding to the multipath signal can be divided into two categories: 1) Sparsely clustered subcarriers " + }, + { + "bbox": [ + 313, + 531, + 561, + 674 + ], + "type": "inline_equation", + "content": "C_s" + }, + { + "bbox": [ + 313, + 531, + 561, + 674 + ], + "type": "text", + "content": ": FSDM signal with different frequencies and propagation directions may go through reflection at many places, but only a few of those signals reach the target with inconsecutive frequencies, resulting in many sparse clusters of subcarriers6. 2) Compactly clustered subcarriers " + }, + { + "bbox": [ + 313, + 531, + 561, + 674 + ], + "type": "inline_equation", + "content": "C_c" + }, + { + "bbox": [ + 313, + 531, + 561, + 674 + ], + "type": "text", + "content": ": There are some FSDM signals with frequencies close to that of the LoS signal. Those FSDM signals reflect just right near the target device, which will result in a" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 57, + 246, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 57, + 246, + 68 + ], + "spans": [ + { + "bbox": [ + 51, + 57, + 246, + 68 + ], + "type": "text", + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "spans": [ + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "type": "text", + "content": "Yimiao Sun, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 314, + 681, + 560, + 708 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 681, + 560, + 708 + ], + "spans": [ + { + "bbox": [ + 314, + 681, + 560, + 708 + ], + "type": "text", + "content": "6 The polarization of the signals may flip after reflection, and we deal with it as the multipath signal in the frequency domain. Thus, this flip doesn't affect the function of our algorithm." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 298, + 731, + 313, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 731, + 313, + 740 + ], + "spans": [ + { + "bbox": [ + 298, + 731, + 313, + 740 + ], + "type": "text", + "content": "381" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 294, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 294, + 106 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 294, + 106 + ], + "type": "text", + "content": "compact and wide cluster of subcarriers influenced by multipath and LoS signals." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 106, + 294, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 106, + 294, + 151 + ], + "spans": [ + { + "bbox": [ + 50, + 106, + 294, + 151 + ], + "type": "text", + "content": "Here we first try to filter out " + }, + { + "bbox": [ + 50, + 106, + 294, + 151 + ], + "type": "inline_equation", + "content": "C_s" + }, + { + "bbox": [ + 50, + 106, + 294, + 151 + ], + "type": "text", + "content": ". To do so, all the varied subcarriers are clustered, respectively, as Fig. 10(b) illustrates. Then, the following integral function will be calculated for every cluster to find the one most likely to be corresponding to the LoS signal," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 122, + 152, + 293, + 185 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 152, + 293, + 185 + ], + "spans": [ + { + "bbox": [ + 122, + 152, + 293, + 185 + ], + "type": "interline_equation", + "content": "C ^ {i} = \\begin{array}{c} \\int f _ {k} ^ {i} \\\\ f _ {k _ {\\mathrm {m i}}} ^ {i} \\end{array} \\| \\Delta H (f _ {k} ^ {i}) \\| d f _ {k} \\tag {7}", + "image_path": "3f5c13107fec7b40dfb8852b2b1b4d06b84994c45e0842a9735a85ba38e37399.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 185, + 294, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 185, + 294, + 248 + ], + "spans": [ + { + "bbox": [ + 50, + 185, + 294, + 248 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 185, + 294, + 248 + ], + "type": "inline_equation", + "content": "f_{k_{\\min}}^{i}" + }, + { + "bbox": [ + 50, + 185, + 294, + 248 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 185, + 294, + 248 + ], + "type": "inline_equation", + "content": "f_{k_{\\max}}^{i}" + }, + { + "bbox": [ + 50, + 185, + 294, + 248 + ], + "type": "text", + "content": " are the minimum and maximum frequencies of the " + }, + { + "bbox": [ + 50, + 185, + 294, + 248 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 50, + 185, + 294, + 248 + ], + "type": "text", + "content": "-th cluster, respectively. The value of " + }, + { + "bbox": [ + 50, + 185, + 294, + 248 + ], + "type": "inline_equation", + "content": "C^i" + }, + { + "bbox": [ + 50, + 185, + 294, + 248 + ], + "type": "text", + "content": " can be regarded as the area formed by the curve of " + }, + { + "bbox": [ + 50, + 185, + 294, + 248 + ], + "type": "inline_equation", + "content": "\\| \\Delta H(f_k^i)\\|" + }, + { + "bbox": [ + 50, + 185, + 294, + 248 + ], + "type": "text", + "content": " and the two frequencies " + }, + { + "bbox": [ + 50, + 185, + 294, + 248 + ], + "type": "inline_equation", + "content": "f_{k_{\\min}}^{i},f_{k_{\\max}}^{i}" + }, + { + "bbox": [ + 50, + 185, + 294, + 248 + ], + "type": "text", + "content": ". The wider the bandwidth and higher the amplitude of a cluster are, the greater the value of its " + }, + { + "bbox": [ + 50, + 185, + 294, + 248 + ], + "type": "inline_equation", + "content": "C^i" + }, + { + "bbox": [ + 50, + 185, + 294, + 248 + ], + "type": "text", + "content": " is." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 249, + 296, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 249, + 296, + 316 + ], + "spans": [ + { + "bbox": [ + 50, + 249, + 296, + 316 + ], + "type": "text", + "content": "After that, we only retain the cluster that bears the highest " + }, + { + "bbox": [ + 50, + 249, + 296, + 316 + ], + "type": "inline_equation", + "content": "C^i" + }, + { + "bbox": [ + 50, + 249, + 296, + 316 + ], + "type": "text", + "content": ", which is most likely to be " + }, + { + "bbox": [ + 50, + 249, + 296, + 316 + ], + "type": "inline_equation", + "content": "C_c" + }, + { + "bbox": [ + 50, + 249, + 296, + 316 + ], + "type": "text", + "content": " and contains subcarriers corresponding to the LoS signal. However, as we mentioned before, some subcarriers in " + }, + { + "bbox": [ + 50, + 249, + 296, + 316 + ], + "type": "inline_equation", + "content": "C_c" + }, + { + "bbox": [ + 50, + 249, + 296, + 316 + ], + "type": "text", + "content": " are also corresponding to the undesired multipath signal. Next, we are going to purify " + }, + { + "bbox": [ + 50, + 249, + 296, + 316 + ], + "type": "inline_equation", + "content": "C_c" + }, + { + "bbox": [ + 50, + 249, + 296, + 316 + ], + "type": "text", + "content": " by narrowing down its frequency range as much as possible." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 323, + 293, + 343 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 323, + 293, + 343 + ], + "spans": [ + { + "bbox": [ + 51, + 323, + 293, + 343 + ], + "type": "text", + "content": "1.5 Purifying the LoS Signal for Localization Denote the frequency range of " + }, + { + "bbox": [ + 51, + 323, + 293, + 343 + ], + "type": "inline_equation", + "content": "C_c" + }, + { + "bbox": [ + 51, + 323, + 293, + 343 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 51, + 323, + 293, + 343 + ], + "type": "inline_equation", + "content": "k^{\\prime}" + }, + { + "bbox": [ + 51, + 323, + 293, + 343 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 51, + 323, + 293, + 343 + ], + "type": "inline_equation", + "content": "k_{\\mathrm{max}}" + }, + { + "bbox": [ + 51, + 323, + 293, + 343 + ], + "type": "text", + "content": " for RHCP signals" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 192, + 346, + 206, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 346, + 206, + 352 + ], + "spans": [ + { + "bbox": [ + 192, + 346, + 206, + 352 + ], + "type": "text", + "content": "min" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "spans": [ + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "text", + "content": "and " + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "inline_equation", + "content": "k_{\\mathrm{mi}}^l, k_{\\mathrm{max}}^l" + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "text", + "content": " for LHCP signals. In both of the two ranges, we are going to find the subcarrier with the largest " + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "inline_equation", + "content": "\\Delta H(f_k|g)" + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "text", + "content": " as Fig. 10(c) illustrates. After obtaining them, we denote the index of selected subcarriers as " + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "inline_equation", + "content": "K^r" + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "inline_equation", + "content": "K^l" + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "text", + "content": ". Next, as Fig. 10(c) depicts, we align " + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "inline_equation", + "content": "K^r" + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "inline_equation", + "content": "K^l" + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "text", + "content": ", then trim the head and tail to retain the intersection of two clusters, " + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "inline_equation", + "content": "\\| \\Delta H^r (f_k) \\|" + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "inline_equation", + "content": "\\| \\Delta H^l (f_k) \\|" + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "text", + "content": ". Finally, we multiply " + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "inline_equation", + "content": "\\| \\Delta H^r (f_k) \\|" + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "inline_equation", + "content": "\\| \\Delta H^l (f_k) \\|" + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "text", + "content": " to form a weight matrix " + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 50, + 353, + 295, + 446 + ], + "type": "text", + "content": ", which is illustrated in Fig. 10(d)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 58, + 449, + 294, + 506 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 449, + 294, + 506 + ], + "spans": [ + { + "bbox": [ + 58, + 449, + 294, + 506 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| \\Delta H ^ {r} \\left(f _ {K ^ {r} - \\delta}\\right) \\right\\| \\\\ G = \\quad \\dots \\quad \\times \\left\\| \\Delta H ^ {l} \\left(f _ {K ^ {l} - \\delta}\\right) \\right\\| \\dots \\left\\| \\Delta H ^ {l} \\left(f _ {K ^ {l} + \\delta}\\right) \\right\\| \\tag {8} \\\\ \\left\\| \\Delta H ^ {T} \\left(f _ {K ^ {T} + \\delta}\\right) \\right\\| \\\\ \\end{array}", + "image_path": "b91b6ec0b5eab711dad3ae20feabbddc67cfc373e1242347c7a94cc1e9363cd3.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 512, + 295, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 512, + 295, + 526 + ], + "spans": [ + { + "bbox": [ + 50, + 512, + 295, + 526 + ], + "type": "text", + "content": "whenteh,ivahs theegh andhylvostnabatngthe wengthafverage" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 529, + 294, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 529, + 294, + 621 + ], + "spans": [ + { + "bbox": [ + 50, + 529, + 294, + 621 + ], + "type": "text", + "content": "of values in " + }, + { + "bbox": [ + 50, + 529, + 294, + 621 + ], + "type": "inline_equation", + "content": "[f_{K^{\\text{r}}\\delta}, f_{K^{\\text{r}}\\delta}]" + }, + { + "bbox": [ + 50, + 529, + 294, + 621 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 529, + 294, + 621 + ], + "type": "inline_equation", + "content": "f_{K^{\\text{l}}\\delta}, f_{K^{\\text{l}}\\delta}" + }, + { + "bbox": [ + 50, + 529, + 294, + 621 + ], + "type": "text", + "content": ", which are weighted by the corresponding values in the matrix " + }, + { + "bbox": [ + 50, + 529, + 294, + 621 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 50, + 529, + 294, + 621 + ], + "type": "text", + "content": ". The purpose of this step is still to mitigate the interference of the multipath signal. After that, the estimated values of the two frequencies will be fed into Eq. (4) to output an estimation of the target's location. Note that if there are multiple WiFi links for selection, one can choose the link that results in the smallest size of " + }, + { + "bbox": [ + 50, + 529, + 294, + 621 + ], + "type": "inline_equation", + "content": "\\| \\Delta H(f_k)\\|" + }, + { + "bbox": [ + 50, + 529, + 294, + 621 + ], + "type": "text", + "content": ", meaning that the range of LoS signals' frequency is reduced to the minimum." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 50, + 621, + 295, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 621, + 295, + 708 + ], + "spans": [ + { + "bbox": [ + 50, + 621, + 295, + 708 + ], + "type": "text", + "content": "Note that the basis of our localization algorithm is using the different CP signals to distinguish different LWAs, and the CP signals can't be replaced by the LP signals. The reason is that the LP signals may lead to high localization errors or even the breakdown of the localization system. Specifically, once the orientation of LP devices changes, polarization directions of these devices change accordingly. As such, each receiving antenna is very likely to receive FSDM signals from both LWAs and can't distinguish them." + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 318, + 80, + 558, + 182 + ], + "blocks": [ + { + "bbox": [ + 318, + 80, + 558, + 182 + ], + "lines": [ + { + "bbox": [ + 318, + 80, + 558, + 182 + ], + "spans": [ + { + "bbox": [ + 318, + 80, + 558, + 182 + ], + "type": "image", + "image_path": "e14dd07adfafe5f6159b6067571cb4b5d052bc770da68484928b81b7d7546a66.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 382, + 186, + 492, + 197 + ], + "lines": [ + { + "bbox": [ + 382, + 186, + 492, + 197 + ], + "spans": [ + { + "bbox": [ + 382, + 186, + 492, + 197 + ], + "type": "text", + "content": "Figure 11: Hardware Settings." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 201, + 559, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 201, + 559, + 293 + ], + "spans": [ + { + "bbox": [ + 313, + 201, + 559, + 293 + ], + "type": "text", + "content": "For example, a receiving antenna with " + }, + { + "bbox": [ + 313, + 201, + 559, + 293 + ], + "type": "inline_equation", + "content": "0^{\\circ}" + }, + { + "bbox": [ + 313, + 201, + 559, + 293 + ], + "type": "text", + "content": " polarization can receive both " + }, + { + "bbox": [ + 313, + 201, + 559, + 293 + ], + "type": "inline_equation", + "content": "0^{\\circ}" + }, + { + "bbox": [ + 313, + 201, + 559, + 293 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 201, + 559, + 293 + ], + "type": "inline_equation", + "content": "90^{\\circ}" + }, + { + "bbox": [ + 313, + 201, + 559, + 293 + ], + "type": "text", + "content": " polarized FSDM signals after rotating " + }, + { + "bbox": [ + 313, + 201, + 559, + 293 + ], + "type": "inline_equation", + "content": "45^{\\circ}" + }, + { + "bbox": [ + 313, + 201, + 559, + 293 + ], + "type": "text", + "content": ". In this case, the target can't distinguish FSDM signals from the two LWAs, and then the localization system can't work. Note that this problem can't be avoided since the target antenna's orientation isn't known in advance. In contrast, CP signals are free from this problem. The RHCP signal can't be received by LHCP antennas no matter which orientation the target antenna has." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 293, + 560, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 293, + 560, + 327 + ], + "spans": [ + { + "bbox": [ + 314, + 293, + 560, + 327 + ], + "type": "text", + "content": "Next, we will proceed with describing the prototype implementation to gain insights on the performance of Bifrost in varied settings." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 328, + 411, + 340 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 328, + 411, + 340 + ], + "spans": [ + { + "bbox": [ + 315, + 328, + 411, + 340 + ], + "type": "text", + "content": "4 EVALUATION" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 352, + 560, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 352, + 560, + 369 + ], + "spans": [ + { + "bbox": [ + 313, + 352, + 560, + 369 + ], + "type": "text", + "content": "We evaluate the performance of Bifrost using two low-cost PCB-based LWAs working at 5.17GHz-5.33GHz and a WiFi sensing plat" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 373, + 559, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 373, + 559, + 548 + ], + "spans": [ + { + "bbox": [ + 313, + 373, + 559, + 548 + ], + "type": "text", + "content": "form called PicoScenes [38] to extract CSI. When Bifrost functions, the WiFi transceiver communicates at the same band based on 802.11ax standard [1]. We first describe our implementation and evaluation settings in §4.1. Then, investigation on Bifrost's performance is four-pronged: §4.2 compares Bifrost with SpotFi [43], the state-of-the-art indoor WiFi localization technique, in a real-world indoor setting and NLoS scenarios, and then shows how the localization accuracy can be improved when Bifrost aids SpotFi to function in AP-constrained scenarios; Subsequently, in §4.3, we conduct an ablation study to evaluate the contribution of each sub-module of localization algorithm; Then, in §4.4, we dissect the impacting factors on localization accuracy, including multipath, transmission power, as well as the distance between LWAs and the AP; Also, we evaluate the influence of deploying Bifrost on data communication of WiFi transceivers in §4.5; Finally we summarize the evaluation in §4.6." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 314, + 557, + 515, + 584 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 557, + 515, + 584 + ], + "spans": [ + { + "bbox": [ + 314, + 557, + 515, + 584 + ], + "type": "text", + "content": "4.1 Implementation and Experimental Methodology" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 588, + 560, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 588, + 560, + 708 + ], + "spans": [ + { + "bbox": [ + 313, + 588, + 560, + 708 + ], + "type": "text", + "content": "Hardware and Software. Our proposed LWA is shown in Fig. 11(b). The main body of our LWA is " + }, + { + "bbox": [ + 313, + 588, + 560, + 708 + ], + "type": "inline_equation", + "content": "24.2\\mathrm{cm} \\times 5.2\\mathrm{cm}" + }, + { + "bbox": [ + 313, + 588, + 560, + 708 + ], + "type": "text", + "content": ", containing 11 single units designed to ensure most input signals' energy can be leaked out. One of the LWA's feed ports is connected to a LP antenna for receiving the WiFi signal while the other port is connected to a " + }, + { + "bbox": [ + 313, + 588, + 560, + 708 + ], + "type": "inline_equation", + "content": "50\\Omega" + }, + { + "bbox": [ + 313, + 588, + 560, + 708 + ], + "type": "text", + "content": " matched load to absorb the remaining energy of the signal that goes through the entire LWA structure. By switching the feed port, the polarization of the FSDM signal can be altered between LHCP and RHCP. Besides, a low-noise amplifier powered by a small rechargeable battery is utilized to boost the input signal with 0.43W power consumption. A NE555 timer IC with a load switch circuit" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 57, + 84, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 57, + 84, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 57, + 84, + 68 + ], + "type": "text", + "content": "Bifrost" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 364, + 57, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 364, + 57, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 364, + 57, + 558, + 68 + ], + "type": "text", + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "spans": [ + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "type": "text", + "content": "382" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 65, + 82, + 282, + 182 + ], + "blocks": [ + { + "bbox": [ + 65, + 82, + 282, + 182 + ], + "lines": [ + { + "bbox": [ + 65, + 82, + 282, + 182 + ], + "spans": [ + { + "bbox": [ + 65, + 82, + 282, + 182 + ], + "type": "image", + "image_path": "ac1f54c4acd4c26f1fbb5b2b4901becd11f79892dd7ce7e313931dd158e5060a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 165, + 184, + 176, + 193 + ], + "lines": [ + { + "bbox": [ + 165, + 184, + 176, + 193 + ], + "spans": [ + { + "bbox": [ + 165, + 184, + 176, + 193 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 65, + 200, + 282, + 303 + ], + "blocks": [ + { + "bbox": [ + 65, + 200, + 282, + 303 + ], + "lines": [ + { + "bbox": [ + 65, + 200, + 282, + 303 + ], + "spans": [ + { + "bbox": [ + 65, + 200, + 282, + 303 + ], + "type": "image", + "image_path": "eefd18541489eb0898ad31fa879c5762152ff2479e5c18822d2667f50c066d6b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 164, + 304, + 176, + 312 + ], + "lines": [ + { + "bbox": [ + 164, + 304, + 176, + 312 + ], + "spans": [ + { + "bbox": [ + 164, + 304, + 176, + 312 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 65, + 320, + 171, + 457 + ], + "blocks": [ + { + "bbox": [ + 65, + 320, + 171, + 457 + ], + "lines": [ + { + "bbox": [ + 65, + 320, + 171, + 457 + ], + "spans": [ + { + "bbox": [ + 65, + 320, + 171, + 457 + ], + "type": "image", + "image_path": "79105e03737063bf0d3574cf9f3347ffdb081d0f7005a9b3f158116807912a80.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 120, + 461, + 130, + 470 + ], + "lines": [ + { + "bbox": [ + 120, + 461, + 130, + 470 + ], + "spans": [ + { + "bbox": [ + 120, + 461, + 130, + 470 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 50, + 483, + 295, + 523 + ], + "lines": [ + { + "bbox": [ + 50, + 483, + 295, + 523 + ], + "spans": [ + { + "bbox": [ + 50, + 483, + 295, + 523 + ], + "type": "text", + "content": "Figure 12: Experimental scenarios and deployment: (a) The hall scenario; (b) The classroom scenario; (c) The APs' deployment in the corridor and the classroom; (d) The APs' deployment in the hall and the meeting room." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 194, + 320, + 284, + 453 + ], + "blocks": [ + { + "bbox": [ + 194, + 320, + 284, + 453 + ], + "lines": [ + { + "bbox": [ + 194, + 320, + 284, + 453 + ], + "spans": [ + { + "bbox": [ + 194, + 320, + 284, + 453 + ], + "type": "image", + "image_path": "f1b54cdfe48e3f891784027e9e717ec71afd8653ef63103aea8b00ad7e668023.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 234, + 460, + 246, + 470 + ], + "lines": [ + { + "bbox": [ + 234, + 460, + 246, + 470 + ], + "spans": [ + { + "bbox": [ + 234, + 460, + 246, + 470 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 543, + 295, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 543, + 295, + 630 + ], + "spans": [ + { + "bbox": [ + 50, + 543, + 295, + 630 + ], + "type": "text", + "content": "is employed to control the on-off state of the amplifier and further LWAs, resulting in a " + }, + { + "bbox": [ + 50, + 543, + 295, + 630 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 50, + 543, + 295, + 630 + ], + "type": "text", + "content": " duty-cycled manner for energy saving. The cost of each proposed LWA is 7.41 USD, where 4.36 USD is for the material cost and 3.05 USD is for the control circuit. To receive the CP FSDM signal, we equip the target with two " + }, + { + "bbox": [ + 50, + 543, + 295, + 630 + ], + "type": "inline_equation", + "content": "3.87\\mathrm{cm}\\times 3.87\\mathrm{cm}" + }, + { + "bbox": [ + 50, + 543, + 295, + 630 + ], + "type": "text", + "content": " patch antennas, as Fig. 11(a) depicts. One antenna is LHCP, while the other is RHCP, and both are fixed on the antenna mount connected to COMFAST AX210 WiFi card [17] on the host computer." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 631, + 295, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 631, + 295, + 675 + ], + "spans": [ + { + "bbox": [ + 50, + 631, + 295, + 675 + ], + "type": "text", + "content": "We use PicoScenes, a WiFi sensing platform, to send WiFi packets at AP with 20dBm, and extract CSI at the target. In the working band of Bifrost, PicoScenes can procure CSI data of 2025 subcarriers with indexes " + }, + { + "bbox": [ + 50, + 631, + 295, + 675 + ], + "type": "inline_equation", + "content": "[-1012, 1012]" + }, + { + "bbox": [ + 50, + 631, + 295, + 675 + ], + "type": "text", + "content": ". We run PicoScenes on Ubuntu 20.04," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 84, + 559, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 84, + 559, + 105 + ], + "spans": [ + { + "bbox": [ + 314, + 84, + 559, + 105 + ], + "type": "text", + "content": "then analyze CSI data and execute the localization algorithm on MATLAB 2022b." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 109, + 560, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 109, + 560, + 197 + ], + "spans": [ + { + "bbox": [ + 314, + 109, + 560, + 197 + ], + "type": "text", + "content": "Baseline. We compare Bifrost with SpotFi, the state-of-the-art indoor WiFi localization technique, under various settings. To ensure the validity of our results, we make our best effort to re-implement SpotFi and ensure fairness through comparison. We evaluate the performance of SpotFi by deploying multiple WiFi APs strictly based on the real-world settings of WiFi APs, as Fig. 12 shows. Before each set of experiments, we use a laser rangefinder to obtain the ground-truth, including coordinates of the target device and LWAs." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 199, + 561, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 199, + 561, + 299 + ], + "spans": [ + { + "bbox": [ + 313, + 199, + 561, + 299 + ], + "type": "text", + "content": "Scenarios and Deployment. We select four typical indoor scenarios for evaluation, across different sizes and different levels of multipath effect: 1) A small-size hall (6.2m×4.5m) with few multipath; 2) A long and narrow corridor (7.5m×2.1m) with few multipath; 3) A small-size meeting room (5.7m×4.9m) with rich multipath; 4) A large-size classroom (10.6m×7.1m) with rich multipath. In each scenario, two LWAs are attached to two orthogonal walls. The target device is mounted onto tripods, keeping the height constant across all experiments." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 315, + 307, + 447, + 319 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 307, + 447, + 319 + ], + "spans": [ + { + "bbox": [ + 315, + 307, + 447, + 319 + ], + "type": "text", + "content": "4.2 Overall Performance" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 322, + 559, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 322, + 559, + 389 + ], + "spans": [ + { + "bbox": [ + 314, + 322, + 559, + 389 + ], + "type": "text", + "content": "In this section, we first evaluate the localization accuracy of Bifrost and SpotFi in real-world settings, where WiFi APs in experiments are deployed at the same positions as those in practice. Then we deploy Bifrost in the meeting room and classroom, where SpotFi doesn't work well, to enhance the performance of SpotFi, so as to see the accuracy improvement brought by Bifrost." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 391, + 559, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 391, + 559, + 511 + ], + "spans": [ + { + "bbox": [ + 313, + 391, + 559, + 511 + ], + "type": "text", + "content": "Performance Comparison in Realistic Settings. In reality, most indoor WiFi APs are dispersively deployed at different locations and very likely separated from each other by walls so that LoS paths are usually obstructed. Thus, the target device is hard to establish more than one LoS connection with APs, according to our real-world investigation (i.e., Fig. 2). We evaluate the performance of SpotFi in these practical indoor settings, and also the localization error of Bifrost when deployed in the above-mentioned four scenarios. 50 locations are chosen in each scenario for location estimation. The evaluation results are reported in Fig. 13 (The solid blue line stands for Bifrost and the dashed red line stands for SpotFi)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 512, + 560, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 512, + 560, + 653 + ], + "spans": [ + { + "bbox": [ + 313, + 512, + 560, + 653 + ], + "type": "text", + "content": "In the hall, both Bifrost and SpotFi are supposed to exhibit the best performance due to the low-level multipath effect, but the median error of SpotFi is " + }, + { + "bbox": [ + 313, + 512, + 560, + 653 + ], + "type": "inline_equation", + "content": "1.23\\mathrm{m}" + }, + { + "bbox": [ + 313, + 512, + 560, + 653 + ], + "type": "text", + "content": ", which is more than " + }, + { + "bbox": [ + 313, + 512, + 560, + 653 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 313, + 512, + 560, + 653 + ], + "type": "text", + "content": " of Bifrost's " + }, + { + "bbox": [ + 313, + 512, + 560, + 653 + ], + "type": "inline_equation", + "content": "0.61\\mathrm{m}" + }, + { + "bbox": [ + 313, + 512, + 560, + 653 + ], + "type": "text", + "content": ". This is because only one decent LoS signal can be obtained at most locations due to the blockage of walls even though three APs are deployed around. As the pie chart illustrates, SpotFi outperforms Bifrost at only 9 locations. When it comes to the corridor scenario, the median error of SpotFi increases to " + }, + { + "bbox": [ + 313, + 512, + 560, + 653 + ], + "type": "inline_equation", + "content": "1.77\\mathrm{m}" + }, + { + "bbox": [ + 313, + 512, + 560, + 653 + ], + "type": "text", + "content": " because two of the three APs are situated inside rooms so that AoAs obtained by the target are heavily distorted. We note that the median error of Bifrost also increases to " + }, + { + "bbox": [ + 313, + 512, + 560, + 653 + ], + "type": "inline_equation", + "content": "0.76\\mathrm{m}" + }, + { + "bbox": [ + 313, + 512, + 560, + 653 + ], + "type": "text", + "content": ". This slight performance degradation is mainly due to the extension of the localization range, which is further investigated in §4.4." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 654, + 560, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 654, + 560, + 708 + ], + "spans": [ + { + "bbox": [ + 314, + 654, + 560, + 708 + ], + "type": "text", + "content": "Next, we switch to the meeting room where more pronounced multipaths exist. What's worse, there is no AP in the meeting room, more challenging for both two approaches to function. The accuracy of the two approaches is unsurprisingly degraded, where the median error is " + }, + { + "bbox": [ + 314, + 654, + 560, + 708 + ], + "type": "inline_equation", + "content": "1.95\\mathrm{m}" + }, + { + "bbox": [ + 314, + 654, + 560, + 708 + ], + "type": "text", + "content": " in SpotFi and " + }, + { + "bbox": [ + 314, + 654, + 560, + 708 + ], + "type": "inline_equation", + "content": "0.91\\mathrm{m}" + }, + { + "bbox": [ + 314, + 654, + 560, + 708 + ], + "type": "text", + "content": " in Bifrost. Similarly, the" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 57, + 247, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 57, + 247, + 68 + ], + "spans": [ + { + "bbox": [ + 51, + 57, + 247, + 68 + ], + "type": "text", + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "spans": [ + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "type": "text", + "content": "Yimiao Sun, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 689, + 295, + 707 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 689, + 295, + 707 + ], + "spans": [ + { + "bbox": [ + 50, + 689, + 295, + 707 + ], + "type": "text", + "content": "PicoScenes automatically interpolates the 0-th and other 32 pilot subcarriers besides 1992 tone RUs in this band." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "spans": [ + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "type": "text", + "content": "383" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 82, + 175, + 199 + ], + "blocks": [ + { + "bbox": [ + 52, + 82, + 175, + 199 + ], + "lines": [ + { + "bbox": [ + 52, + 82, + 175, + 199 + ], + "spans": [ + { + "bbox": [ + 52, + 82, + 175, + 199 + ], + "type": "image", + "image_path": "20c13b4e9eca0eb9f70a5bb01dab7d7b75d52113724c5af0424c18db95b25e49.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 100, + 203, + 126, + 211 + ], + "lines": [ + { + "bbox": [ + 100, + 203, + 126, + 211 + ], + "spans": [ + { + "bbox": [ + 100, + 203, + 126, + 211 + ], + "type": "text", + "content": "(a) Hall" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 181, + 82, + 302, + 199 + ], + "blocks": [ + { + "bbox": [ + 181, + 82, + 302, + 199 + ], + "lines": [ + { + "bbox": [ + 181, + 82, + 302, + 199 + ], + "spans": [ + { + "bbox": [ + 181, + 82, + 302, + 199 + ], + "type": "image", + "image_path": "75fa902b814efe455e4e687ee56b920319ee15e0c247276fed013941dd966a6b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 221, + 203, + 261, + 212 + ], + "lines": [ + { + "bbox": [ + 221, + 203, + 261, + 212 + ], + "spans": [ + { + "bbox": [ + 221, + 203, + 261, + 212 + ], + "type": "text", + "content": "(b) Corridor" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 308, + 82, + 430, + 199 + ], + "blocks": [ + { + "bbox": [ + 308, + 82, + 430, + 199 + ], + "lines": [ + { + "bbox": [ + 308, + 82, + 430, + 199 + ], + "spans": [ + { + "bbox": [ + 308, + 82, + 430, + 199 + ], + "type": "image", + "image_path": "13b58135f6e1b371566e5ed5acca1a3ad836d23cf639ccd43f0c1935dc9868e1.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 342, + 203, + 397, + 213 + ], + "lines": [ + { + "bbox": [ + 342, + 203, + 397, + 213 + ], + "spans": [ + { + "bbox": [ + 342, + 203, + 397, + 213 + ], + "type": "text", + "content": "(c) Meeting room" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 436, + 82, + 557, + 199 + ], + "blocks": [ + { + "bbox": [ + 436, + 82, + 557, + 199 + ], + "lines": [ + { + "bbox": [ + 436, + 82, + 557, + 199 + ], + "spans": [ + { + "bbox": [ + 436, + 82, + 557, + 199 + ], + "type": "image", + "image_path": "921fa998df71dbda1001c882766862254b51e8730c8684b124dbf457256bc250.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 474, + 203, + 519, + 212 + ], + "lines": [ + { + "bbox": [ + 474, + 203, + 519, + 212 + ], + "spans": [ + { + "bbox": [ + 474, + 203, + 519, + 212 + ], + "type": "text", + "content": "(d) Classroom" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 62, + 246, + 178, + 368 + ], + "blocks": [ + { + "bbox": [ + 62, + 246, + 178, + 368 + ], + "lines": [ + { + "bbox": [ + 62, + 246, + 178, + 368 + ], + "spans": [ + { + "bbox": [ + 62, + 246, + 178, + 368 + ], + "type": "image", + "image_path": "60f56b36363b5e62b664b4fda9fd7d15b212e2b11c0e085fa5105f820f27e554.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 82, + 374, + 178, + 384 + ], + "lines": [ + { + "bbox": [ + 82, + 374, + 178, + 384 + ], + "spans": [ + { + "bbox": [ + 82, + 374, + 178, + 384 + ], + "type": "text", + "content": "(a) The NLoS AP outdoors." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 92, + 392, + 256, + 402 + ], + "lines": [ + { + "bbox": [ + 92, + 392, + 256, + 402 + ], + "spans": [ + { + "bbox": [ + 92, + 392, + 256, + 402 + ], + "type": "text", + "content": "Figure 14: Deployment of the NLoS settings." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 186, + 245, + 275, + 368 + ], + "blocks": [ + { + "bbox": [ + 50, + 219, + 557, + 239 + ], + "lines": [ + { + "bbox": [ + 50, + 219, + 557, + 239 + ], + "spans": [ + { + "bbox": [ + 50, + 219, + 557, + 239 + ], + "type": "text", + "content": "Figure 13: Overall performance of Bifrost and SpotFi across different scenarios (The pie charts represent how many locations where each method shows a lower error)." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 186, + 245, + 275, + 368 + ], + "lines": [ + { + "bbox": [ + 186, + 245, + 275, + 368 + ], + "spans": [ + { + "bbox": [ + 186, + 245, + 275, + 368 + ], + "type": "image", + "image_path": "6bb200cf265600ee644a5c41e6e2c4fcc79f9f4fe3811f9f401168875b39ae3c.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 192, + 374, + 284, + 384 + ], + "lines": [ + { + "bbox": [ + 192, + 374, + 284, + 384 + ], + "spans": [ + { + "bbox": [ + 192, + 374, + 284, + 384 + ], + "type": "text", + "content": "(b) The NLoS AP indoors." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 50, + 421, + 295, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 421, + 295, + 509 + ], + "spans": [ + { + "bbox": [ + 50, + 421, + 295, + 509 + ], + "type": "text", + "content": "performance of SpotFi is restrained due to the lack of the LoS signal. Bifrost exhibits acceptable performance in this tough environment and avoids escalation of errors. This can be attributed to two aspects. On one hand, Bifrost can function once the input signal has enough energy, without the need for LoS AP. On the other hand, Bifrost exploits a delicate algorithm to tame the multipath effect. We will further discuss issues of multipath and NLoS in §4.4. In this scenario, SpotFi doesn't outperform Bifrost on any point." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 50, + 510, + 295, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 510, + 295, + 574 + ], + "spans": [ + { + "bbox": [ + 50, + 510, + 295, + 574 + ], + "type": "text", + "content": "Finally, we set SpotFi and Bifrost in the large-size classroom with rich multipath. With a LoS AP, the median error of SpotFi is reduced to " + }, + { + "bbox": [ + 50, + 510, + 295, + 574 + ], + "type": "inline_equation", + "content": "1.87\\mathrm{m}" + }, + { + "bbox": [ + 50, + 510, + 295, + 574 + ], + "type": "text", + "content": ", which is better than that in the meeting room with no LoS AP. By contrast, the median error of Bifrost increases to " + }, + { + "bbox": [ + 50, + 510, + 295, + 574 + ], + "type": "inline_equation", + "content": "1.20\\mathrm{m}" + }, + { + "bbox": [ + 50, + 510, + 295, + 574 + ], + "type": "text", + "content": ", mainly due to a longer distance between LWAs and WiFi APs and more multipath." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 50, + 574, + 295, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 574, + 295, + 640 + ], + "spans": [ + { + "bbox": [ + 50, + 574, + 295, + 640 + ], + "type": "text", + "content": "Through all experiments in four scenarios, the median error of Bifrost is " + }, + { + "bbox": [ + 50, + 574, + 295, + 640 + ], + "type": "inline_equation", + "content": "0.81\\mathrm{m}" + }, + { + "bbox": [ + 50, + 574, + 295, + 640 + ], + "type": "text", + "content": ", which is " + }, + { + "bbox": [ + 50, + 574, + 295, + 640 + ], + "type": "inline_equation", + "content": "52.35\\%" + }, + { + "bbox": [ + 50, + 574, + 295, + 640 + ], + "type": "text", + "content": " less than that of SpotFi (i.e., " + }, + { + "bbox": [ + 50, + 574, + 295, + 640 + ], + "type": "inline_equation", + "content": "1.70\\mathrm{m}" + }, + { + "bbox": [ + 50, + 574, + 295, + 640 + ], + "type": "text", + "content": "). Bifrost outperforms SpotFi at most locations, except at which the target can obtain 3 LoS signals from 3 APs. However, as shown in Fig. 13, the chance for SpotFi to achieve better performance is less than " + }, + { + "bbox": [ + 50, + 574, + 295, + 640 + ], + "type": "inline_equation", + "content": "7\\%" + }, + { + "bbox": [ + 50, + 574, + 295, + 640 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 50, + 643, + 295, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 643, + 295, + 686 + ], + "spans": [ + { + "bbox": [ + 50, + 643, + 295, + 686 + ], + "type": "text", + "content": "Performance Comparison in NLoS Scenarios. Then we conduct two groups of experiments to demonstrate Bifrost's ability of localization in NLoS scenarios and compare its performance with that of SpotFi." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 50, + 686, + 295, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 686, + 295, + 708 + ], + "spans": [ + { + "bbox": [ + 50, + 686, + 295, + 708 + ], + "type": "text", + "content": "In the first group of experiments, we deploy the localized target and the LWAs in a hall. As Bifrost only uses one AP to function," + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 251, + 560, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 251, + 560, + 393 + ], + "spans": [ + { + "bbox": [ + 313, + 251, + 560, + 393 + ], + "type": "text", + "content": "we evaluate the performance of Bifrost when this AP is inside and outside the hall (i.e., LoS and NLoS scenarios). The results in Fig. 15 show that the median errors of Bifrost are " + }, + { + "bbox": [ + 313, + 251, + 560, + 393 + ], + "type": "inline_equation", + "content": "0.61\\mathrm{m}" + }, + { + "bbox": [ + 313, + 251, + 560, + 393 + ], + "type": "text", + "content": " in LoS and " + }, + { + "bbox": [ + 313, + 251, + 560, + 393 + ], + "type": "inline_equation", + "content": "0.73\\mathrm{m}" + }, + { + "bbox": [ + 313, + 251, + 560, + 393 + ], + "type": "text", + "content": " in NLoS, respectively. Meanwhile, in the same hall, we also evaluate the performance of SpotFi in LoS and NLoS scenarios, respectively. In the LoS scenario, 3 APs are deployed in the hall and can establish LoS connections with the target. In the NLoS scenario, as Fig. 14(a) shows, one of the APs (i.e., AP1) is outside the room, while the other 2 APs (i.e., AP2 and AP3) can connect with the target along the LoS paths. We find that the median error of SpotFi increases from " + }, + { + "bbox": [ + 313, + 251, + 560, + 393 + ], + "type": "inline_equation", + "content": "0.45\\mathrm{m}" + }, + { + "bbox": [ + 313, + 251, + 560, + 393 + ], + "type": "text", + "content": " in LoS to " + }, + { + "bbox": [ + 313, + 251, + 560, + 393 + ], + "type": "inline_equation", + "content": "1.15\\mathrm{m}" + }, + { + "bbox": [ + 313, + 251, + 560, + 393 + ], + "type": "text", + "content": " in NLoS. The error may further go beyond " + }, + { + "bbox": [ + 313, + 251, + 560, + 393 + ], + "type": "inline_equation", + "content": "1.6\\mathrm{m}" + }, + { + "bbox": [ + 313, + 251, + 560, + 393 + ], + "type": "text", + "content": " if only one AP is left in LoS, as reported in [43]." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 394, + 560, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 394, + 560, + 514 + ], + "spans": [ + { + "bbox": [ + 313, + 394, + 560, + 514 + ], + "type": "text", + "content": "In the second group of experiments, we compare the performance of Bifrost and SpotFi using a different NLoS setting. As Fig. 14(b) shows, we deploy the localized target, LWAs and three APs in the same hall. One of the three WiFi APs (i.e., AP1) is deliberately deployed around the corner and surrounded by multiple chairs, so it can't establish LoS connections between the target or the LWAs, while the other 2 APs (i.e., AP2 and AP3) can connect with the target along the LoS paths. SpotFi uses all 3 APs to localize the target, and its median error is " + }, + { + "bbox": [ + 313, + 394, + 560, + 514 + ], + "type": "inline_equation", + "content": "1.21\\mathrm{m}" + }, + { + "bbox": [ + 313, + 394, + 560, + 514 + ], + "type": "text", + "content": ". Bifrost only uses the AP in NLoS (i.e., AP1) to function, and its median error is " + }, + { + "bbox": [ + 313, + 394, + 560, + 514 + ], + "type": "inline_equation", + "content": "0.69\\mathrm{m}" + }, + { + "bbox": [ + 313, + 394, + 560, + 514 + ], + "type": "text", + "content": ", which is " + }, + { + "bbox": [ + 313, + 394, + 560, + 514 + ], + "type": "inline_equation", + "content": "42.98\\%" + }, + { + "bbox": [ + 313, + 394, + 560, + 514 + ], + "type": "text", + "content": " less than that of SpotFi." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 514, + 559, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 514, + 559, + 559 + ], + "spans": [ + { + "bbox": [ + 313, + 514, + 559, + 559 + ], + "type": "text", + "content": "These two groups of experiments demonstrate that Bifrost provides relatively stable performance when the WiFi AP is in LoS and NLoS scenarios. In NLoS scenarios, Bifrost can achieve much more accurate performance than SpotFi." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 561, + 560, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 561, + 560, + 658 + ], + "spans": [ + { + "bbox": [ + 313, + 561, + 560, + 658 + ], + "type": "text", + "content": "Performance Enhancement when Bifrost Aids SpotFi. Next, we deploy Bifrost where SpotFi shows poor accuracy to see if Bifrost can aid SpotFi to improve localization accuracy. Actually, it is impossible to deploy Bifrost everywhere, so we choose the meeting room and classroom where localization accuracy is heavily affected by constrained APs and reports the worst results. Specifically, when the target gets into these two scenarios, its location will be reported by Bifrost. Otherwise, the target keeps using SpotFi for indoor localization." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 313, + 659, + 558, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 659, + 558, + 693 + ], + "spans": [ + { + "bbox": [ + 313, + 659, + 558, + 693 + ], + "type": "text", + "content": "As shown in Fig. 16, the median localization error is " + }, + { + "bbox": [ + 313, + 659, + 558, + 693 + ], + "type": "inline_equation", + "content": "1.13\\mathrm{m}" + }, + { + "bbox": [ + 313, + 659, + 558, + 693 + ], + "type": "text", + "content": " when Bifrost aids SpotFi, achieving " + }, + { + "bbox": [ + 313, + 659, + 558, + 693 + ], + "type": "inline_equation", + "content": "33.54\\%" + }, + { + "bbox": [ + 313, + 659, + 558, + 693 + ], + "type": "text", + "content": " error reduction compared with SpotFi operating independently in all scenarios. This indicates" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 57, + 84, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 57, + 84, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 57, + 84, + 68 + ], + "type": "text", + "content": "Bifrost" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 364, + 57, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 364, + 57, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 364, + 57, + 558, + 68 + ], + "type": "text", + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "spans": [ + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "type": "text", + "content": "384" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 82, + 179, + 199 + ], + "blocks": [ + { + "bbox": [ + 52, + 82, + 179, + 199 + ], + "lines": [ + { + "bbox": [ + 52, + 82, + 179, + 199 + ], + "spans": [ + { + "bbox": [ + 52, + 82, + 179, + 199 + ], + "type": "image", + "image_path": "352097b5d2f11c8cdaf6dd1ceacd68ed9b900106b469d063d16f21faf358a35f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 208, + 423, + 231 + ], + "lines": [ + { + "bbox": [ + 50, + 208, + 423, + 231 + ], + "spans": [ + { + "bbox": [ + 50, + 208, + 423, + 231 + ], + "type": "text", + "content": "Figure 15: Performance of Bifrost Figure 16: Performance enhance-Figure 17: Ablation study on the and SpotFi in the NLoS scenario. ment brought by Bifrost. localization algorithm." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 181, + 82, + 302, + 198 + ], + "blocks": [ + { + "bbox": [ + 181, + 82, + 302, + 198 + ], + "lines": [ + { + "bbox": [ + 181, + 82, + 302, + 198 + ], + "spans": [ + { + "bbox": [ + 181, + 82, + 302, + 198 + ], + "type": "image", + "image_path": "8440172285ba16eaeb2c520a87c7567b6450151cb2e9e25468a2c653a068c295.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 304, + 81, + 422, + 198 + ], + "blocks": [ + { + "bbox": [ + 304, + 81, + 422, + 198 + ], + "lines": [ + { + "bbox": [ + 304, + 81, + 422, + 198 + ], + "spans": [ + { + "bbox": [ + 304, + 81, + 422, + 198 + ], + "type": "image", + "image_path": "67c7cfcad83ea3c2c70ebae1a6e28f6ac8a004921890e9bd31e0f8baa85750ab.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 432, + 208, + 559, + 229 + ], + "lines": [ + { + "bbox": [ + 432, + 208, + 559, + 229 + ], + "spans": [ + { + "bbox": [ + 432, + 208, + 559, + 229 + ], + "type": "text", + "content": "Figure 18: Impact of the multipath effect." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 433, + 82, + 559, + 198 + ], + "blocks": [ + { + "bbox": [ + 433, + 82, + 559, + 198 + ], + "lines": [ + { + "bbox": [ + 433, + 82, + 559, + 198 + ], + "spans": [ + { + "bbox": [ + 433, + 82, + 559, + 198 + ], + "type": "image", + "image_path": "46dc08a5fc2398f6e3fd535ac2a3b8f880e1ed26ef679898911b6677925990eb.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 247, + 294, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 247, + 294, + 269 + ], + "spans": [ + { + "bbox": [ + 50, + 247, + 294, + 269 + ], + "type": "text", + "content": "that Bifrost can not only work independently, but also enhance localization accuracy of existing localization techniques." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 284, + 153, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 284, + 153, + 297 + ], + "spans": [ + { + "bbox": [ + 50, + 284, + 153, + 297 + ], + "type": "text", + "content": "4.3 Ablation Study" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 299, + 296, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 299, + 296, + 396 + ], + "spans": [ + { + "bbox": [ + 50, + 299, + 296, + 396 + ], + "type": "text", + "content": "There are three crucial sub-modules in Bifrost's localization algorithm, that is, identifying the frequencies of CP signals (module 1 presented in §3.3), filtering out the multipath signal (module 2 presented in §3.4) and purifying the LoS signal for localization (module 3 presented in §3.5). We conduct an ablation study to evaluate the contribution of each sub-module to localization accuracy. The evaluation is conducted under four settings, S1: without any sub-module, S2: only with module 1, S3: with modules 1 and 2, and S4: with all three modules." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 397, + 296, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 397, + 296, + 496 + ], + "spans": [ + { + "bbox": [ + 50, + 397, + 296, + 496 + ], + "type": "text", + "content": "Fig. 17 reports the results of this ablation study. If we do nothing and directly extract frequencies from raw amplitude data of CSI, the median localization error will surge to " + }, + { + "bbox": [ + 50, + 397, + 296, + 496 + ], + "type": "inline_equation", + "content": "3.31\\mathrm{m}" + }, + { + "bbox": [ + 50, + 397, + 296, + 496 + ], + "type": "text", + "content": " (S1). Instead, once the LP WiFi signal is filtered out, the frequencies of CP signals can be highlighted, which results in the median localization error of " + }, + { + "bbox": [ + 50, + 397, + 296, + 496 + ], + "type": "inline_equation", + "content": "1.51\\mathrm{m}" + }, + { + "bbox": [ + 50, + 397, + 296, + 496 + ], + "type": "text", + "content": " (S2). Further, the results of S3 and S4 show that the median error will be reduced to around " + }, + { + "bbox": [ + 50, + 397, + 296, + 496 + ], + "type": "inline_equation", + "content": "0.93\\mathrm{m}" + }, + { + "bbox": [ + 50, + 397, + 296, + 496 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 397, + 296, + 496 + ], + "type": "inline_equation", + "content": "0.81\\mathrm{m}" + }, + { + "bbox": [ + 50, + 397, + 296, + 496 + ], + "type": "text", + "content": " if we filter out the multipath signal and purify the LoS signal. These results show the necessity and contribution of each module in our design." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 511, + 168, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 511, + 168, + 525 + ], + "spans": [ + { + "bbox": [ + 50, + 511, + 168, + 525 + ], + "type": "text", + "content": "4.4 Impacting Factors" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 50, + 526, + 296, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 526, + 296, + 569 + ], + "spans": [ + { + "bbox": [ + 50, + 526, + 296, + 569 + ], + "type": "text", + "content": "Next, we analyze the impact of three different factors on the performance of Bifrost, that is, multipath in the environment, the transmission power, as well as the distance between LWAs and WiFi AP." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 50, + 574, + 296, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 574, + 296, + 705 + ], + "spans": [ + { + "bbox": [ + 50, + 574, + 296, + 705 + ], + "type": "text", + "content": "Multipath. We examine the AoA estimation accuracy of Bifrost in multipath scenarios. We fix the positions of LWAs and the target, then change the number of indoor objects (i.e., chairs and desks) to create different degrees of multipath. Specifically, two desks are first set in the room to emulate a light multipath environment, and then ten chairs are further added to produce richer signal reflections. The results in Fig. 18 indicate that the AoA estimation accuracy degrades as the multipath is intensified, where the median angle error initially sits around " + }, + { + "bbox": [ + 50, + 574, + 296, + 705 + ], + "type": "inline_equation", + "content": "3.8^{\\circ}" + }, + { + "bbox": [ + 50, + 574, + 296, + 705 + ], + "type": "text", + "content": ", and then increases to around " + }, + { + "bbox": [ + 50, + 574, + 296, + 705 + ], + "type": "inline_equation", + "content": "6.7^{\\circ}" + }, + { + "bbox": [ + 50, + 574, + 296, + 705 + ], + "type": "text", + "content": ". The more multipath exists, the more sparsely clustered subcarriers " + }, + { + "bbox": [ + 50, + 574, + 296, + 705 + ], + "type": "inline_equation", + "content": "C_s" + }, + { + "bbox": [ + 50, + 574, + 296, + 705 + ], + "type": "text", + "content": " are formed. Thus, when these clusters are stacked with each other to form a wider cluster, there is a certain chance for our" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 247, + 559, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 247, + 559, + 268 + ], + "spans": [ + { + "bbox": [ + 314, + 247, + 559, + 268 + ], + "type": "text", + "content": "algorithm to misidentify the wrong LoS signal, causing greater errors in AoA estimation." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 269, + 560, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 269, + 560, + 323 + ], + "spans": [ + { + "bbox": [ + 314, + 269, + 560, + 323 + ], + "type": "text", + "content": "We also note that Bifrost maintains relatively stable performance across different polarizations. The difference between median errors of LHCP and RHCP signals is less than " + }, + { + "bbox": [ + 314, + 269, + 560, + 323 + ], + "type": "inline_equation", + "content": "0.3^{\\circ}" + }, + { + "bbox": [ + 314, + 269, + 560, + 323 + ], + "type": "text", + "content": ", which underscores the robustness of our proposed LWA and localization algorithm." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 327, + 560, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 327, + 560, + 491 + ], + "spans": [ + { + "bbox": [ + 313, + 327, + 560, + 491 + ], + "type": "text", + "content": "Transmission Power. The default transmission power of AP is 20dBm in our above-mentioned evaluations, and we now vary this value to investigate its impact on localization performance. Moreover, as mentioned before, we can't always guarantee that the WiFi AP establishes LoS path with LWAs, so we also compare the situation of the AP at LoS and NLoS scenarios in each setting of transmission power. We place AP at 2m distance outside the door and the target 2m inside the door, switching between the LoS and NLoS scenarios by opening and closing the door. Results in Fig. 19 show that decreasing the transmission power leads to an increase in the localization error, regardless of whether the AP is at LoS or NLoS. Besides, the errors in LoS scenario are always lower than that of NLoS for the same transmission power. These findings indicate the negative impact on localization performance that NLoS can have." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 492, + 560, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 492, + 560, + 569 + ], + "spans": [ + { + "bbox": [ + 314, + 492, + 560, + 569 + ], + "type": "text", + "content": "However, we also observe that as the transmission power increases, the impact of NLoS on the performance of Bifrost decreases, albeit gradually. Notably, when the transmission power is set at " + }, + { + "bbox": [ + 314, + 492, + 560, + 569 + ], + "type": "inline_equation", + "content": "20\\mathrm{dBm}" + }, + { + "bbox": [ + 314, + 492, + 560, + 569 + ], + "type": "text", + "content": ", the median errors are " + }, + { + "bbox": [ + 314, + 492, + 560, + 569 + ], + "type": "inline_equation", + "content": "0.61\\mathrm{m}" + }, + { + "bbox": [ + 314, + 492, + 560, + 569 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 492, + 560, + 569 + ], + "type": "inline_equation", + "content": "0.73\\mathrm{m}" + }, + { + "bbox": [ + 314, + 492, + 560, + 569 + ], + "type": "text", + "content": " at LoS and NLoS scenarios, respectively. In practical scenarios, this performance is sufficient to meet the requirements of most location-based applications." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 573, + 560, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 573, + 560, + 705 + ], + "spans": [ + { + "bbox": [ + 313, + 573, + 560, + 705 + ], + "type": "text", + "content": "Distance between AP and LWAs. The performance of Bifrost may be influenced by the energy of the input WiFi signal fed into LWAs, because it determines the SNR (signal-to-noise ratio) of the FSDM signal. The energy of the input WiFi signal is mainly related to two factors, namely the transmission power and the distance between the AP and LWAs. While the former factor is previously discussed, we here probe into the impact of distance. We carry out the experiments along the corridor and remove the reflectors as far as possible, while the distance is set to " + }, + { + "bbox": [ + 313, + 573, + 560, + 705 + ], + "type": "inline_equation", + "content": "2.5\\mathrm{m}" + }, + { + "bbox": [ + 313, + 573, + 560, + 705 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 573, + 560, + 705 + ], + "type": "inline_equation", + "content": "5\\mathrm{m}" + }, + { + "bbox": [ + 313, + 573, + 560, + 705 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 573, + 560, + 705 + ], + "type": "inline_equation", + "content": "7.5\\mathrm{m}" + }, + { + "bbox": [ + 313, + 573, + 560, + 705 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 573, + 560, + 705 + ], + "type": "inline_equation", + "content": "10\\mathrm{m}" + }, + { + "bbox": [ + 313, + 573, + 560, + 705 + ], + "type": "text", + "content": ". Results in Fig. 20 demonstrate that the localization error increases with distance and may even result in outliers. The median errors are " + }, + { + "bbox": [ + 313, + 573, + 560, + 705 + ], + "type": "inline_equation", + "content": "0.63\\mathrm{m}" + }, + { + "bbox": [ + 313, + 573, + 560, + 705 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 573, + 560, + 705 + ], + "type": "inline_equation", + "content": "0.65\\mathrm{m}" + }, + { + "bbox": [ + 313, + 573, + 560, + 705 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 573, + 560, + 705 + ], + "type": "inline_equation", + "content": "0.93\\mathrm{m}" + }, + { + "bbox": [ + 313, + 573, + 560, + 705 + ], + "type": "text", + "content": " in the first three groups of experiments," + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 57, + 247, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 57, + 247, + 68 + ], + "spans": [ + { + "bbox": [ + 51, + 57, + 247, + 68 + ], + "type": "text", + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "spans": [ + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "type": "text", + "content": "Yimiao Sun, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "spans": [ + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "type": "text", + "content": "385" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 82, + 183, + 199 + ], + "blocks": [ + { + "bbox": [ + 55, + 82, + 183, + 199 + ], + "lines": [ + { + "bbox": [ + 55, + 82, + 183, + 199 + ], + "spans": [ + { + "bbox": [ + 55, + 82, + 183, + 199 + ], + "type": "image", + "image_path": "16a38231d650bb86b5ff8ee17331187aa7c7d79c58ccc45b049d167c2786c7dd.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 208, + 556, + 231 + ], + "lines": [ + { + "bbox": [ + 52, + 208, + 556, + 231 + ], + "spans": [ + { + "bbox": [ + 52, + 208, + 556, + 231 + ], + "type": "text", + "content": "Figure 19: Impact of the transmis- Figure 20: Impact of the distance Figure 21: Impact on the AP and Figure 22: Impact on other WiFi con-sion power. between AP and LWAs. the target of Bifrost. nections." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 186, + 82, + 304, + 199 + ], + "blocks": [ + { + "bbox": [ + 186, + 82, + 304, + 199 + ], + "lines": [ + { + "bbox": [ + 186, + 82, + 304, + 199 + ], + "spans": [ + { + "bbox": [ + 186, + 82, + 304, + 199 + ], + "type": "image", + "image_path": "2ce18e1841f8f48f59c2e0552990819ca20516f63df55589e295f5240d1087a7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 306, + 82, + 427, + 199 + ], + "blocks": [ + { + "bbox": [ + 306, + 82, + 427, + 199 + ], + "lines": [ + { + "bbox": [ + 306, + 82, + 427, + 199 + ], + "spans": [ + { + "bbox": [ + 306, + 82, + 427, + 199 + ], + "type": "image", + "image_path": "5f2a96e0c1af772f0330b928316bb4518b1496e40fd924527949102072aa6cf5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 429, + 82, + 553, + 198 + ], + "blocks": [ + { + "bbox": [ + 429, + 82, + 553, + 198 + ], + "lines": [ + { + "bbox": [ + 429, + 82, + 553, + 198 + ], + "spans": [ + { + "bbox": [ + 429, + 82, + 553, + 198 + ], + "type": "image", + "image_path": "41240c86f3dc7ef92bd433d9ea054df14aee43845c5886815c7a746fcfd18272.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 247, + 296, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 247, + 296, + 291 + ], + "spans": [ + { + "bbox": [ + 50, + 247, + 296, + 291 + ], + "type": "text", + "content": "all of which are below " + }, + { + "bbox": [ + 50, + 247, + 296, + 291 + ], + "type": "inline_equation", + "content": "1\\mathrm{m}" + }, + { + "bbox": [ + 50, + 247, + 296, + 291 + ], + "type": "text", + "content": ", yet spike to " + }, + { + "bbox": [ + 50, + 247, + 296, + 291 + ], + "type": "inline_equation", + "content": "1.49\\mathrm{m}" + }, + { + "bbox": [ + 50, + 247, + 296, + 291 + ], + "type": "text", + "content": " in the setting of " + }, + { + "bbox": [ + 50, + 247, + 296, + 291 + ], + "type": "inline_equation", + "content": "10\\mathrm{m}" + }, + { + "bbox": [ + 50, + 247, + 296, + 291 + ], + "type": "text", + "content": " distance. Despite this, the range of " + }, + { + "bbox": [ + 50, + 247, + 296, + 291 + ], + "type": "inline_equation", + "content": "7.5\\mathrm{m}" + }, + { + "bbox": [ + 50, + 247, + 296, + 291 + ], + "type": "text", + "content": " is sufficient to cover most rooms in a typical building, thus ensuring the feasibility of Bifrost's function." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 300, + 212, + 313 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 300, + 212, + 313 + ], + "spans": [ + { + "bbox": [ + 50, + 300, + 212, + 313 + ], + "type": "text", + "content": "4.5 Impact on Communication" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 315, + 295, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 315, + 295, + 413 + ], + "spans": [ + { + "bbox": [ + 50, + 315, + 295, + 413 + ], + "type": "text", + "content": "In this section, we evaluate the impact of deploying Bifrost on the WiFi connections, including the connection between the AP and the target as well as other connections. Firstly, we control the AP to transmit 1000 packets at a " + }, + { + "bbox": [ + 50, + 315, + 295, + 413 + ], + "type": "inline_equation", + "content": "50~ms" + }, + { + "bbox": [ + 50, + 315, + 295, + 413 + ], + "type": "text", + "content": " interval, and the packet loss rate is recorded in each group of experiments. The results in Fig. 21 show that the median packet loss rates are " + }, + { + "bbox": [ + 50, + 315, + 295, + 413 + ], + "type": "inline_equation", + "content": "3.92\\%" + }, + { + "bbox": [ + 50, + 315, + 295, + 413 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 315, + 295, + 413 + ], + "type": "inline_equation", + "content": "3.71\\%" + }, + { + "bbox": [ + 50, + 315, + 295, + 413 + ], + "type": "text", + "content": " when the LWA is on and off, respectively. This " + }, + { + "bbox": [ + 50, + 315, + 295, + 413 + ], + "type": "inline_equation", + "content": "0.2\\%" + }, + { + "bbox": [ + 50, + 315, + 295, + 413 + ], + "type": "text", + "content": " difference implies that the function of Bifrost has a negligible influence on the AP-target communication." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 414, + 296, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 414, + 296, + 556 + ], + "spans": [ + { + "bbox": [ + 50, + 414, + 296, + 556 + ], + "type": "text", + "content": "Secondly, we place Bifrost's transceiver at an intersection region covered by two commercial APs (AP1 in a classroom and AP2 in a laboratory) with good signal quality. We then use different off-the-shelf smartphones to establish WiFi connections with these APs and record the variation in throughput over 2 hours for each connection (C1: OnePlus 9-AP1, C2: iPhone 13-AP2, C3: OnePlus 9-AP1, and C4: iPhone-13-AP2). The results are shown in Fig. 22. We find that the median throughput degrades " + }, + { + "bbox": [ + 50, + 414, + 296, + 556 + ], + "type": "inline_equation", + "content": "2.7\\%" + }, + { + "bbox": [ + 50, + 414, + 296, + 556 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 414, + 296, + 556 + ], + "type": "inline_equation", + "content": "0.4\\%" + }, + { + "bbox": [ + 50, + 414, + 296, + 556 + ], + "type": "text", + "content": " in C1 and C3, which have nearly no impact on the network quality or user experience. Interestingly, the throughput increases when the LWAs are turned on for C2 and C4. We attribute this increase to the statistical error that is mainly caused by changes in network quality and wireless channels." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 565, + 197, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 565, + 197, + 578 + ], + "spans": [ + { + "bbox": [ + 51, + 565, + 197, + 578 + ], + "type": "text", + "content": "4.6 Summary of Evaluation" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 579, + 295, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 579, + 295, + 601 + ], + "spans": [ + { + "bbox": [ + 50, + 579, + 295, + 601 + ], + "type": "text", + "content": "Based on the above evaluations on Bifrost, the following summary can be drawn:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 604, + 296, + 709 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 604, + 295, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 604, + 295, + 638 + ], + "spans": [ + { + "bbox": [ + 53, + 604, + 295, + 638 + ], + "type": "text", + "content": "1) The median localization error of Bifrost is " + }, + { + "bbox": [ + 53, + 604, + 295, + 638 + ], + "type": "inline_equation", + "content": "0.81\\mathrm{m}" + }, + { + "bbox": [ + 53, + 604, + 295, + 638 + ], + "type": "text", + "content": ", which is " + }, + { + "bbox": [ + 53, + 604, + 295, + 638 + ], + "type": "inline_equation", + "content": "52.35\\%" + }, + { + "bbox": [ + 53, + 604, + 295, + 638 + ], + "type": "text", + "content": " less than that of SpotFi in arguably realistic indoor settings." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 639, + 296, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 639, + 296, + 673 + ], + "spans": [ + { + "bbox": [ + 53, + 639, + 296, + 673 + ], + "type": "text", + "content": "2) Bifrost can be deployed in scenarios without enough APs to help SpotFi enhance performance, reducing the overall localization error of SpotFi by " + }, + { + "bbox": [ + 53, + 639, + 296, + 673 + ], + "type": "inline_equation", + "content": "33.54\\%" + }, + { + "bbox": [ + 53, + 639, + 296, + 673 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 53, + 675, + 296, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 675, + 296, + 709 + ], + "spans": [ + { + "bbox": [ + 53, + 675, + 296, + 709 + ], + "type": "text", + "content": "3) Distance between LWAs and APs, multipath and transmission power influence Bifrost's performance differently, yet the absolute accuracy never degrades drastically." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 318, + 247, + 558, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 247, + 558, + 281 + ], + "spans": [ + { + "bbox": [ + 318, + 247, + 558, + 281 + ], + "type": "text", + "content": "4) The deployment of Bifrost has a negligible impact on the communication quality of either the link between the AP and the target or other WiFi connections." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 296, + 407, + 307 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 296, + 407, + 307 + ], + "spans": [ + { + "bbox": [ + 315, + 296, + 407, + 307 + ], + "type": "text", + "content": "5 DISCUSSION" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 311, + 560, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 311, + 560, + 333 + ], + "spans": [ + { + "bbox": [ + 314, + 311, + 560, + 333 + ], + "type": "text", + "content": "In this section, we discuss practical issues concerning the applicability and efficacy of Bifrost." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 335, + 560, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 335, + 560, + 401 + ], + "spans": [ + { + "bbox": [ + 314, + 335, + 560, + 401 + ], + "type": "text", + "content": "Complexity of Deployment. Deploying Bifrost can be easy and straightforward via two steps: stick LWAs to the wall, and measure LWAs' coordinates. Compared with most existing indoor localization methods, Bifrost works in a plug-and-play manner, requiring neither complex configurations nor additional operations on APs and the target." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 314, + 403, + 560, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 403, + 560, + 470 + ], + "spans": [ + { + "bbox": [ + 314, + 403, + 560, + 470 + ], + "type": "text", + "content": "FoV and Coverage of LWAs. Bifrost achieves " + }, + { + "bbox": [ + 314, + 403, + 560, + 470 + ], + "type": "inline_equation", + "content": "22^{\\circ}" + }, + { + "bbox": [ + 314, + 403, + 560, + 470 + ], + "type": "text", + "content": " FoV in the current prototype by using 160MHz bandwidth (5.17GHz - 5.33GHz). The FoV and coverage can be expanded by using the entire WiFi band, including frequencies at 2.4GHz, 5.2GHz, and 5.8GHz [47]. This expansion is feasible because most existing WiFi devices have supported dual- or tri-band functionality." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 472, + 560, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 472, + 560, + 593 + ], + "spans": [ + { + "bbox": [ + 313, + 472, + 560, + 593 + ], + "type": "text", + "content": "Applicability. Considering that most of the current commercial WiFi devices are equipped with LP antennas, they may be not compatible with Bifrost yet. There are two potential solutions to enhance the applicability of Bifrost. On one hand, some commercial off-the-shelf CP antennas (e.g., CP flat patch antennas [45] of L-com, Inc) are developed to be integrated with existing WiFi APs. Bifrost can be deployed on such devices. On the other hand, in our future work, we will study how to utilize LP rather than CP signals to improve the applicability of Bifrost. To distinguish LWAs using the LP signals, different phase shifts or OOK patterns may be exploited." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 594, + 560, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 560, + 681 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 560, + 681 + ], + "type": "text", + "content": "Besides, the indoor obstacles may also influence the applicability of Bifrost. The reason is that the localization performance will degrade if the LoS paths between LWAs and the target are blocked by the obstacles. Therefore, one may select proper positions to deploy LWAs to avoid NLoS propagation to the target to be localized. However, the LoS path between LWAs and the WiFi AP isn't a precondition. As long as the LWAs can receive the signal from the WiFi AP, Bifrost can work." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 314, + 684, + 558, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 684, + 558, + 706 + ], + "spans": [ + { + "bbox": [ + 314, + 684, + 558, + 706 + ], + "type": "text", + "content": "Lifetime and Maintenance Cost. The rated current of LWAs is " + }, + { + "bbox": [ + 314, + 684, + 558, + 706 + ], + "type": "inline_equation", + "content": "0.86\\mathrm{mA}" + }, + { + "bbox": [ + 314, + 684, + 558, + 706 + ], + "type": "text", + "content": ". A LWA is powered with a 1600mAh battery and works" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 57, + 84, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 57, + 84, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 57, + 84, + 68 + ], + "type": "text", + "content": "Bifrost" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 364, + 57, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 364, + 57, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 364, + 57, + 558, + 69 + ], + "type": "text", + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "spans": [ + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "type": "text", + "content": "386" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 294, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 294, + 118 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 294, + 118 + ], + "type": "text", + "content": "at " + }, + { + "bbox": [ + 50, + 84, + 294, + 118 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 50, + 84, + 294, + 118 + ], + "type": "text", + "content": " duty cycle. So the estimated lifetime of a LWA is over 9302 hours (" + }, + { + "bbox": [ + 50, + 84, + 294, + 118 + ], + "type": "inline_equation", + "content": "\\approx" + }, + { + "bbox": [ + 50, + 84, + 294, + 118 + ], + "type": "text", + "content": "387 days) and the maintenance cost is recharging the battery once every 387 days." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 120, + 294, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 120, + 294, + 207 + ], + "spans": [ + { + "bbox": [ + 50, + 120, + 294, + 207 + ], + "type": "text", + "content": "Potential Interference. One may be concerned that if multiple LWAs are deployed closely, LWAs with the same polarization will interfere with each other. However, each room only has one RHCP LWA and one LHCP LWA in the setting of Bifrost, so LWAs with the same polarization are separated by walls. Interference signals must propagate through the wall, after which they only have low strength. Therefore, different pairs of LWAs hardly interfere with each other." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 220, + 157, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 220, + 157, + 232 + ], + "spans": [ + { + "bbox": [ + 50, + 220, + 157, + 232 + ], + "type": "text", + "content": "6 RELATED WORK" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 236, + 294, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 236, + 294, + 258 + ], + "spans": [ + { + "bbox": [ + 50, + 236, + 294, + 258 + ], + "type": "text", + "content": "In this section, we briefly summarize existing works in the fields related to our work." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 270, + 178, + 283 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 270, + 178, + 283 + ], + "spans": [ + { + "bbox": [ + 50, + 270, + 178, + 283 + ], + "type": "text", + "content": "6.1 Application of LWA" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 285, + 296, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 285, + 296, + 361 + ], + "spans": [ + { + "bbox": [ + 50, + 285, + 296, + 361 + ], + "type": "text", + "content": "The work closest to ours is 123-LOC [42], which presents a THz LWA with two perpendicular slots to radiate horizontal and vertical polarized FSDM signals. Range and angle estimation is then performed by the receiver based on the bandwidth and frequencies of received signals. In comparison, Bifrost reduces the impact of multipath and achieves room-scale localization, which is a challenging task for THz signals." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 361, + 296, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 361, + 296, + 515 + ], + "spans": [ + { + "bbox": [ + 50, + 361, + 296, + 515 + ], + "type": "text", + "content": "LeakyTrack [21] tracks the object between two LWAs based on the observation that nodal and environmental motion changes the received spectral profile of FSDM signals. [76] investigates the security of THz networks with LWAs and shows that FSDM signals of the LWA can hinder eavesdroppers, e.g., by using a wide-band transmission. [20] and [22] study single-shot link discovery with the help of FSDM signals from the LWA. A receiver can discover the direction of the path from the transmitter in one shot. In contrast to those works that require a specific feeding device for THz LWA, Bifrost operates in the WiFi band and works in a plug-and-play manner, providing better applicability and convenience. Additionally, Bifrost addresses relevant challenges, including multipath, noise and ambiguity, by delicately designing the hardware and localization algorithm." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 528, + 232, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 528, + 232, + 540 + ], + "spans": [ + { + "bbox": [ + 50, + 528, + 232, + 540 + ], + "type": "text", + "content": "6.2 WiFi-based Indoor Localization" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 542, + 296, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 542, + 296, + 663 + ], + "spans": [ + { + "bbox": [ + 50, + 542, + 296, + 663 + ], + "type": "text", + "content": "There have been numerous efforts on indoor localization with WiFi [16, 49, 61, 68-70, 84]. Traditional fingerprint-based techniques have been widely used by mapping the RSS readings from multiple APs with locations [46, 66]. Techniques based on AoA and ToF have become more prevalent recently. For example, ArrayTrack [69] proposes an AoA-based WiFi localization system that incorporates multiple APs and the Multiple Signal Classification (MUSIC) algorithm. SpotFi [43] proposes a MUSIC algorithm to obtain AoA and ToF simultaneously. The " + }, + { + "bbox": [ + 50, + 542, + 296, + 663 + ], + "type": "inline_equation", + "content": "M^3" + }, + { + "bbox": [ + 50, + 542, + 296, + 663 + ], + "type": "text", + "content": " system [16] reduces the amount of APs to only one by utilizing multipath signals and frequency hopping among multiple channels." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 664, + 296, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 664, + 296, + 707 + ], + "spans": [ + { + "bbox": [ + 50, + 664, + 296, + 707 + ], + "type": "text", + "content": "Despite such inspiring advances, the existing proposals may chop up the communication link between the target and the AP when the target hops between different APs or channels. In contrast, Bifrost does not interfere with the communication link, which" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 84, + 559, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 84, + 559, + 106 + ], + "spans": [ + { + "bbox": [ + 314, + 84, + 559, + 106 + ], + "type": "text", + "content": "supplements the APs' localization ability, without compromising their communication ability." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 115, + 515, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 115, + 515, + 129 + ], + "spans": [ + { + "bbox": [ + 314, + 115, + 515, + 129 + ], + "type": "text", + "content": "6.3 Polarization of the Wireless Signal" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 130, + 560, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 130, + 560, + 163 + ], + "spans": [ + { + "bbox": [ + 313, + 130, + 560, + 163 + ], + "type": "text", + "content": "LLAMA [15] designs a metasurface to mitigate polarization mismatch by rotating the polarization of wireless signals, which is achieved by applying the bias voltage to the orthogonal compo" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 167, + 560, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 167, + 560, + 278 + ], + "spans": [ + { + "bbox": [ + 313, + 167, + 560, + 278 + ], + "type": "text", + "content": "nents (like " + }, + { + "bbox": [ + 313, + 167, + 560, + 278 + ], + "type": "inline_equation", + "content": "\\overrightarrow{E_x}" + }, + { + "bbox": [ + 313, + 167, + 560, + 278 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 167, + 560, + 278 + ], + "type": "inline_equation", + "content": "\\overrightarrow{E_y}" + }, + { + "bbox": [ + 313, + 167, + 560, + 278 + ], + "type": "text", + "content": " shown in Fig. 4) of input signals. RoS [55] and mmTag [51] propose well-designed Van Atta arrays. They all change the polarization of input mmWave signals to the orthogonal one to deal with the self-interference between the incoming signals and the backscattered signals. IntuWition [77] observes that different materials can reflect and scatter the incoming polarized signals in different ways, based on which it exploits the technique to classify various materials. SiWa [83] utilizes the similar principle to inspect the wall structure without undermining the structural integrity." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 278, + 560, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 278, + 560, + 312 + ], + "spans": [ + { + "bbox": [ + 313, + 278, + 560, + 312 + ], + "type": "text", + "content": "The above-mentioned works mainly focus on mutable LP signals. Bifrost instead explores the use of orthogonal CP signals, providing more robust performance." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 316, + 491, + 328 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 316, + 491, + 328 + ], + "spans": [ + { + "bbox": [ + 314, + 316, + 491, + 328 + ], + "type": "text", + "content": "6.4 Backscatter-aided Localization" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 331, + 560, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 331, + 560, + 462 + ], + "spans": [ + { + "bbox": [ + 313, + 331, + 560, + 462 + ], + "type": "text", + "content": "Enabled by the backscatter technology [8, 26, 29-31, 37, 53, 55, 58], many novel applications are enabled, one of which is localization. Both Hawkeye [8] and Millimetro [58] design backscatter tags based on Van Atta arrays to enhance the energy of backscatter signals, so they can localize tags in long range (over " + }, + { + "bbox": [ + 313, + 331, + 560, + 462 + ], + "type": "inline_equation", + "content": "100\\mathrm{m}" + }, + { + "bbox": [ + 313, + 331, + 560, + 462 + ], + "type": "text", + "content": "). By assigning unique OOK modulation frequencies to different tags, those two works can also identify and localize tags simultaneously. Moreover, RFID technology [34-36, 39-41] has been widely used in localization tasks. As a typical backscatter technology, RFID can modulate information via the RFID tags. Then, RFID reader can usually infer the range or orientation to the tags by analyzing the phase variation of the backscatter signals." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 463, + 558, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 463, + 558, + 495 + ], + "spans": [ + { + "bbox": [ + 313, + 463, + 558, + 495 + ], + "type": "text", + "content": "Compared to those works, Bifrost utilizes tags (i.e., LWAs) to create FSDM signals to localize another target, rather than the tag itself." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 504, + 414, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 504, + 414, + 516 + ], + "spans": [ + { + "bbox": [ + 315, + 504, + 414, + 516 + ], + "type": "text", + "content": "7 CONCLUSION" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 519, + 560, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 519, + 560, + 629 + ], + "spans": [ + { + "bbox": [ + 313, + 519, + 560, + 629 + ], + "type": "text", + "content": "This paper introduces Bifrost, a low-cost and plug-and-play technique to enhance the availability and accuracy of WiFi localization. It can either aid existing techniques to improve their performance, or operate independently to outperform the state of the arts in arguably realistic indoor settings, without affecting ongoing data communication of WiFi networks. What sets Bifrost apart from other solutions is the exploration in the polarization of wireless signals and the dispersion property of LWAs, which embodies the concept of RF computing [15, 29, 53, 55]. We plan to explore the research space further in this direction." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 315, + 638, + 446, + 649 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 638, + 446, + 649 + ], + "spans": [ + { + "bbox": [ + 315, + 638, + 446, + 649 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 653, + 560, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 653, + 560, + 707 + ], + "spans": [ + { + "bbox": [ + 313, + 653, + 560, + 707 + ], + "type": "text", + "content": "We thank our anonymous shepherd and reviewers for their insightful comments. This work is partially supported by the National Natural Science Foundation of China under grant No. U21B2007, and the Guoqiang Institute of Tsinghua University under grant No. 2021GQG1002." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 57, + 246, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 57, + 246, + 68 + ], + "spans": [ + { + "bbox": [ + 51, + 57, + 246, + 68 + ], + "type": "text", + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "spans": [ + { + "bbox": [ + 492, + 57, + 559, + 68 + ], + "type": "text", + "content": "Yimiao Sun, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "spans": [ + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "type": "text", + "content": "387" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 83, + 126, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 83, + 126, + 94 + ], + "spans": [ + { + "bbox": [ + 52, + 83, + 126, + 94 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 96, + 296, + 706 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 56, + 96, + 296, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 96, + 296, + 145 + ], + "spans": [ + { + "bbox": [ + 56, + 96, + 296, + 145 + ], + "type": "text", + "content": "[1] 2021. IEEE Standard for Information Technology-Telecommunications and Information Exchange between Systems Local and Metropolitan Area Networks-Specific Requirements Part 11: Wireless LAN Medium Access Control (MAC) and Physical Layer (PHY) Specifications Amendment 1: Enhancements for High-Efficiency WLAN. IEEE Std 802.11ax-2021 (Amendment to IEEE Std 802.11-2020) (2021)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 144, + 294, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 144, + 294, + 177 + ], + "spans": [ + { + "bbox": [ + 55, + 144, + 294, + 177 + ], + "type": "text", + "content": "[2] Afaz Uddin Ahmed, Reza Arablouei, Frank De Hoog, Branislav Kusy, and Raja Jurdak. 2019. Multi-radio Data Fusion for Indoor Localization Using Bluetooth and WiFi. In Proceedings of the 9th International Conference on Pervasive and Embedded Computing and Communication Systems: Volume 1: PECC." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 177, + 294, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 177, + 294, + 208 + ], + "spans": [ + { + "bbox": [ + 56, + 177, + 294, + 208 + ], + "type": "text", + "content": "[3] Amazon. 2023. Amazon NETGEAR 4-Stream WiFi 6 Router. https://www.amazon.com/NETGEAR-4-Stream-WiFi-Router-R6700AX/dp/B08KTXG8Q5/ref=sr_1_5?keywords=wifi+router&qid=1687784198&sr=8-5. (2023). Accessed: 2023-06-26." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 209, + 294, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 209, + 294, + 239 + ], + "spans": [ + { + "bbox": [ + 56, + 209, + 294, + 239 + ], + "type": "text", + "content": "[4] Amazon. 2023. Amazon Tenda AC1200 Smart WiFi Router. https://www.amazon.com/Tenda-Wireless-Internet-MU-MIMO-AC6/dp/B06X1CHFJ5/ref=sr_1_51?keywords " + }, + { + "bbox": [ + 56, + 209, + 294, + 239 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 56, + 209, + 294, + 239 + ], + "type": "text", + "content": " wifi+router&qid " + }, + { + "bbox": [ + 56, + 209, + 294, + 239 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 56, + 209, + 294, + 239 + ], + "type": "text", + "content": " 1687784310&sr " + }, + { + "bbox": [ + 56, + 209, + 294, + 239 + ], + "type": "inline_equation", + "content": "= 8 - 51" + }, + { + "bbox": [ + 56, + 209, + 294, + 239 + ], + "type": "text", + "content": " (2023).Accessed:2023-06-26." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 240, + 294, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 240, + 294, + 271 + ], + "spans": [ + { + "bbox": [ + 56, + 240, + 294, + 271 + ], + "type": "text", + "content": "[5] Amazon. 2023. Amazon TP-Link AC1200 WiFi Router. https://www.amazon.com/TP-Link-AC1200-Router-Archer-A54/dp/B09G5Y1HWZ/ref=sr_1_1?keywords=wifi+router&qid=1687784198&sr=8-1. (2023). Accessed: 2023-06-26." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 271, + 294, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 271, + 294, + 303 + ], + "spans": [ + { + "bbox": [ + 56, + 271, + 294, + 303 + ], + "type": "text", + "content": "[6] Amazon. 2023. Amazon TP-Link Smart WiFi 6 Router. https://www.amazon.com/TP-Link-Wireless-AX1500-Wifi-Router/dp/B07ZSDR49S/ref=sr_1_3?keywords=wifi+router&qid=1687784198&sr=8-3. (2023). Accessed: 2023-06-26." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 303, + 294, + 343 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 303, + 294, + 343 + ], + "spans": [ + { + "bbox": [ + 56, + 303, + 294, + 343 + ], + "type": "text", + "content": "[7] Roshan Ayyalasomayajula, Aditya Arun, Chenfeng Wu, Sanatan Sharma, Abhishek Rajkumar Sethi, Deepak Vasisht, and Dinesh Bharadia. 2020. Deep Learning Based Wireless Localization for Indoor Navigation. In Proceedings of the 26th Annual International Conference on Mobile Computing and Networking (MobiCom)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 344, + 294, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 344, + 294, + 376 + ], + "spans": [ + { + "bbox": [ + 56, + 344, + 294, + 376 + ], + "type": "text", + "content": "[8] Kang Min Bae, Hankyeol Moon, Sung-Min Sohn, and Song Min Kim. 2023. Hawkeye: Hectometer-range Subcentimeter Localization for Large-scale mmWave Backscatter. In Proceedings of the 21st Annual International Conference on Mobile Systems, Applications and Services (MobiSys)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 376, + 294, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 376, + 294, + 407 + ], + "spans": [ + { + "bbox": [ + 56, + 376, + 294, + 407 + ], + "type": "text", + "content": "[9] Atul Bansal, Akshay Gadre, Vaibhav Singh, Anthony Rowe, Bob Iannucci, and Swarun Kumar. 2021. Owll: Accurate LoRa Localization Using the TV Whitespaces. In Proceedings of the 20th International Conference on Information Processing in Sensor Networks (IPSN)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 407, + 294, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 407, + 294, + 432 + ], + "spans": [ + { + "bbox": [ + 53, + 407, + 294, + 432 + ], + "type": "text", + "content": "[10] Yuanxi Cao and Sen Yan. 2021. A Low-profile High-gain Multi-beam Antenna based on 3D-printed Cylindrical Luneburg Lens. Microwave and Optical Technology Letters 63, 7 (2021)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 432, + 294, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 432, + 294, + 456 + ], + "spans": [ + { + "bbox": [ + 53, + 432, + 294, + 456 + ], + "type": "text", + "content": "[11] Yuanxi Cao and Sen Yan. 2021. Multi-beam SIW Leaky-wave Antenna with 2-D Beam Scanning Capability for Millimeter-wave Radar Applications. International Journal of RF and Microwave Computer-aided Engineering 31, 5 (2021)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 456, + 294, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 456, + 294, + 480 + ], + "spans": [ + { + "bbox": [ + 53, + 456, + 294, + 480 + ], + "type": "text", + "content": "[12] Yuanxi Cao, Sen Yan, and Juan Chen. 2023. An SIW Pillbox-based Compact Dual-polarized Multibeam Antenna with Passive 2-D Beam Scanning Capability. IEEE Transactions on Circuits and Systems II: Express Briefs 70, 1 (2023)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 53, + 481, + 294, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 481, + 294, + 505 + ], + "spans": [ + { + "bbox": [ + 53, + 481, + 294, + 505 + ], + "type": "text", + "content": "[13] Yuanxi Cao, Sen Yan, Wendong Liu, and Jianxing Li. 2023. A Wideband Multibeam Pillbox Antenna Based on Differentially Fed Leaky-wave Array. IEEE Antennas and Wireless Propagation Letters 22, 3 (2023)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 53, + 505, + 294, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 505, + 294, + 529 + ], + "spans": [ + { + "bbox": [ + 53, + 505, + 294, + 529 + ], + "type": "text", + "content": "[14] Roberto Carvalho, Shan-Ho Yang, Yao-Hua Ho, and Ling-Jyh Chen. [n.d.]. Indoor Localization Using FM and DVB-T Signals. In Proceedings of the 2016 13th IEEE Annual Consumer Communications & Networking Conference (CCNC)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 53, + 529, + 294, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 529, + 294, + 561 + ], + "spans": [ + { + "bbox": [ + 53, + 529, + 294, + 561 + ], + "type": "text", + "content": "[15] Lili Chen, Wenjun Hu, Kyle Jamieson, Xiaojiang Chen, Dingyi Fang, and Jeremy Gummeson. [n. d.]. Pushing the Physical Limits of IoT Devices with Programmable Metasurfaces. In Proceedings of the 18th USENIX Symposium on Networked Systems Design and Implementation (NSDI)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 53, + 561, + 294, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 561, + 294, + 585 + ], + "spans": [ + { + "bbox": [ + 53, + 561, + 294, + 585 + ], + "type": "text", + "content": "[16] Zhe Chen, Guorong Zhu, Sulei Wang, Yuedong Xu, Jie Xiong, Jin Zhao, Jun Luo, and Xin Wang. 2019. " + }, + { + "bbox": [ + 53, + 561, + 294, + 585 + ], + "type": "inline_equation", + "content": "M^3" + }, + { + "bbox": [ + 53, + 561, + 294, + 585 + ], + "type": "text", + "content": ": Multipath Assisted Wi-Fi Localization with a Single Access Point. IEEE Transactions on Mobile Computing 20, 2 (2019)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 53, + 585, + 294, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 585, + 294, + 601 + ], + "spans": [ + { + "bbox": [ + 53, + 585, + 294, + 601 + ], + "type": "text", + "content": "[17] COMFAST. 2023. CF-AX210 PRO. http://www.comfast.com.cn/index.php?m=content&c=index&a=show&catid=13&id=123. (2023). Accessed: 2023-03-17." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 53, + 601, + 294, + 625 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 601, + 294, + 625 + ], + "spans": [ + { + "bbox": [ + 53, + 601, + 294, + 625 + ], + "type": "text", + "content": "[18] Pei Du and Nirupama Bulusu. 2021. An Automated AR-based Annotation Tool for Indoor Navigation for Visually Impaired People. In Proceedings of the 23rd International ACM SIGACCESS Conference on Computers and Accessibility." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 53, + 625, + 294, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 625, + 294, + 649 + ], + "spans": [ + { + "bbox": [ + 53, + 625, + 294, + 649 + ], + "type": "text", + "content": "[19] Pei Du and Nirupama Bulusu. 2022. Indoor Navigation for Visually Impaired People with Vertex Colored Graphs. In Proceedings of the 20th Annual International Conference on Mobile Systems, Applications and Services (MobiSys)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 53, + 649, + 294, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 649, + 294, + 673 + ], + "spans": [ + { + "bbox": [ + 53, + 649, + 294, + 673 + ], + "type": "text", + "content": "[20] Yasaman Ghasempour, Rabi Shrestha, Aaron Charous, Edward Knightly, and Daniel M Mittleman. 2020. Single-shot Link Discovery for Terahertz Wireless Networks. Nature Communications 11, 1 (2020)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 53, + 673, + 294, + 706 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 673, + 294, + 706 + ], + "spans": [ + { + "bbox": [ + 53, + 673, + 294, + 706 + ], + "type": "text", + "content": "[21] Yasaman Ghasempour, Chia-Yi Yeh, Rabi Shrestha, Yasith Amarasinghe, Daniel Mittleman, and Edward W. Knightly. 2020. LeakyTrack: Non-coherent Single-antenna Nodal and Environmental Mobility Tracking with a Leaky-wave Antenna. In Proceedings of the 18th Conference on Embedded Networked Sensor Systems" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 317, + 87, + 558, + 703 + ], + "type": "list", + "angle": 0, + "index": 48, + "blocks": [ + { + "bbox": [ + 331, + 87, + 359, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 87, + 359, + 95 + ], + "spans": [ + { + "bbox": [ + 331, + 87, + 359, + 95 + ], + "type": "text", + "content": "(SenSys)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 95, + 558, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 95, + 558, + 126 + ], + "spans": [ + { + "bbox": [ + 317, + 95, + 558, + 126 + ], + "type": "text", + "content": "[22] Yasaman Ghasempour, Chia-Yi Yeh, Rabi Shrestha, Daniel Mittleman, and Edward Knightly. 2020. Single Shot Single Antenna Path Discovery in THz Networks. In Proceedings of the 26th Annual International Conference on Mobile Computing and Networking (MobiCom)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 126, + 558, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 126, + 558, + 150 + ], + "spans": [ + { + "bbox": [ + 317, + 126, + 558, + 150 + ], + "type": "text", + "content": "[23] Jon Gjengset, Jie Xiong, Graeme McPhillips, and Kyle Jamieson. 2014. Accurate Indoor Localization with Zero Start-up Cost. In Proceedings of the 20th Annual International Conference on Mobile Computing and Networking (MobiCom)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 150, + 558, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 150, + 558, + 182 + ], + "spans": [ + { + "bbox": [ + 317, + 150, + 558, + 182 + ], + "type": "text", + "content": "[24] Jon Gjengset, Jie Xiong, Graeme McPhillips, and Kyle Jamieson. 2014. Phaser: Enabling Phased Array Signal Processing on Commodity WiFi Access Points. In Proceedings of the 20th Annual International Conference on Mobile Computing and Networking (MobiCom)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 182, + 558, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 182, + 558, + 215 + ], + "spans": [ + { + "bbox": [ + 317, + 182, + 558, + 215 + ], + "type": "text", + "content": "[25] Baoshen Guo, Weijian Zuo, Shuai Wang, Wenjun Lyu, Zhiqing Hong, Yi Ding, Tian He, and Desheng Zhang. 2022. Wepos: Weak-supervised Indoor Positioning with Unlabeled WiFi for On-demand Delivery. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 6, 2 (2022), 1-25." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 215, + 558, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 215, + 558, + 247 + ], + "spans": [ + { + "bbox": [ + 317, + 215, + 558, + 247 + ], + "type": "text", + "content": "[26] Xiuzhen Guo, Yuan He, Zihao Yu, Jiacheng Zhang, Yunhao Liu, and Longfei Shangguan. 2022. RF-transformer: A Unified Backscatter Radio Hardware Abstraction. In Proceedings of the 28th Annual International Conference on Mobile Computing And Networking (MobiCom)." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 317, + 247, + 558, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 247, + 558, + 278 + ], + "spans": [ + { + "bbox": [ + 317, + 247, + 558, + 278 + ], + "type": "text", + "content": "[27] Xiuzhen Guo, Yuan He, Xiaolong Zheng, Liangcheng Yu, and Omprakash Gnawali. 2018. ZigFi: Harnessing Channel State Information for Cross-Technology Communication. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM)." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 317, + 278, + 558, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 278, + 558, + 303 + ], + "spans": [ + { + "bbox": [ + 317, + 278, + 558, + 303 + ], + "type": "text", + "content": "[28] Xiuzhen Guo, Yuan He, Xiaolong Zheng, Liangcheng Yu, and Omprakash Gnawali. 2020. ZigFi: Harnessing Channel State Information for Cross-Technology Communication. IEEE/ACM Transactions on Networking 28, 1 (2020), 301–311." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 317, + 303, + 558, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 303, + 558, + 334 + ], + "spans": [ + { + "bbox": [ + 317, + 303, + 558, + 334 + ], + "type": "text", + "content": "[29] Xiuzhen Guo, Longfei Shangguan, Yuan He, Nan Jing, Jiacheng Zhang, Haotian Jiang, and Yunhao Liu. 2022. Saiyan: Design and Implementation of a Low-power Demodulator for LoRa Backscatter Systems. In Proceedings of the 19th USENIX Symposium on Networked Systems Design and Implementation (NSDI)." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 317, + 334, + 558, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 334, + 558, + 366 + ], + "spans": [ + { + "bbox": [ + 317, + 334, + 558, + 366 + ], + "type": "text", + "content": "[30] Xiuzhen Guo, Longfei Shangguan, Yuan He, Jia Zhang, Haotian Jiang, Awais Ahmad Siddiqi, and Yunhao Liu. 2020. Aloba: Rethinking ON-OFF Keying Modulation for Ambient LoRa Backscatter. In Proceedings of the 18th Conference on Embedded Networked Sensor Systems (SenSys)." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 317, + 366, + 558, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 366, + 558, + 398 + ], + "spans": [ + { + "bbox": [ + 317, + 366, + 558, + 398 + ], + "type": "text", + "content": "[31] Xiuzhen Guo, Longfei Shangguan, Yuan He, Jia Zhang, Haotian Jiang, Awais Ahmad Siddiqi, and Yunhao Liu. 2021. Efficient Ambient LoRa Backscatter with On-Off Keying Modulation. IEEE/ACM Transactions on Networking 30, 2 (2021), 641-654." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 317, + 398, + 558, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 398, + 558, + 422 + ], + "spans": [ + { + "bbox": [ + 317, + 398, + 558, + 422 + ], + "type": "text", + "content": "[32] Yuan He, Weiguo Wang, Luca Mottola, Shuai Li, Yimiao Sun, Jinming Li, Hua Jing, Ting Wang, and Yulei Wang. 2023. Acoustic Localization System for Precise Drone Landing. IEEE Transactions on Mobile Computing (2023)." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 317, + 422, + 558, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 422, + 558, + 437 + ], + "spans": [ + { + "bbox": [ + 317, + 422, + 558, + 437 + ], + "type": "text", + "content": "[33] David R Jackson, Christophe Caloz, and Tatsuo Itoh. 2012. Leaky-wave Antennas. Proc. IEEE 100, 7 (2012)." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 317, + 437, + 558, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 437, + 558, + 461 + ], + "spans": [ + { + "bbox": [ + 317, + 437, + 558, + 461 + ], + "type": "text", + "content": "[34] Chengkun Jiang, Yuan He, Songzhen Yang, Junchen Guo, and Yunhao Liu. 2019. 3D-OmniTrack: 3D Tracking with COTS RFID Systems. In Proceedings of the 18th International Conference on Information Processing in Sensor Networks (IPSN)." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 317, + 461, + 558, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 461, + 558, + 486 + ], + "spans": [ + { + "bbox": [ + 317, + 461, + 558, + 486 + ], + "type": "text", + "content": "[35] Chengkun Jiang, Yuan He, Xiaolong Zheng, and Yunhao Liu. 2018. Orientation-aware RFID Tracking with Centimeter-level Accuracy. In Proceedings of the 17th International Conference on Information Processing in Sensor Networks (IPSN)." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 317, + 486, + 558, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 486, + 558, + 510 + ], + "spans": [ + { + "bbox": [ + 317, + 486, + 558, + 510 + ], + "type": "text", + "content": "[36] Chengkun Jiang, Yuan He, Xiaolong Zheng, and Yunhao Liu. 2019. OmniTrack: Orientation-aware RFID Tracking with Centimeter-level Accuracy. IEEE Transactions on Mobile Computing 20, 2 (2019), 634-646." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 317, + 510, + 558, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 510, + 558, + 535 + ], + "spans": [ + { + "bbox": [ + 317, + 510, + 558, + 535 + ], + "type": "text", + "content": "[37] Haotian Jiang, Jiacheng Zhang, Xiuzhen Guo, and Yuan He. 2021. Sense Me on the Ride: Accurate Mobile Sensing Over a LoRa Backscatter Channel. In Proceedings of the 19th ACM Conference on Embedded Networked Sensor Systems (SenSys)." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 317, + 535, + 558, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 535, + 558, + 567 + ], + "spans": [ + { + "bbox": [ + 317, + 535, + 558, + 567 + ], + "type": "text", + "content": "[38] Zhiping Jiang, Tom H. Luan, Xincheng Ren, Dongtao Lv, Han Hao, Jing Wang, Kun Zhao, Wei Xi, Yueshen Xu, and Rui Li. 2022. Eliminating the Barriers: Demystifying Wi-Fi Baseband Design and Introducing the PicoScenes Wi-Fi Sensing Platform. IEEE Internet of Things Journal 9, 6 (2022)." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 317, + 567, + 558, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 567, + 558, + 591 + ], + "spans": [ + { + "bbox": [ + 317, + 567, + 558, + 591 + ], + "type": "text", + "content": "[39] Meng Jin, Yuan He, Songzhen Yang, Yunhao Liu, Li Yan, and Yuji Sun. 2022. Versatile RFID-based Sensing: Model, Algorithm, and Applications. IEEE Transactions on Mobile Computing (2022)." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 317, + 591, + 558, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 591, + 558, + 616 + ], + "spans": [ + { + "bbox": [ + 317, + 591, + 558, + 616 + ], + "type": "text", + "content": "[40] Meng Jin, Kexin Li, Xiaohua Tian, Xinbing Wang, and Chenghu Zhou. 2023. Fast, Fine-grained, and Robust Grouping of RFIDs. In Proceedings of the 29th Annual International Conference on Mobile Computing and Networking (MobiCom)." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 317, + 616, + 558, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 616, + 558, + 647 + ], + "spans": [ + { + "bbox": [ + 317, + 616, + 558, + 647 + ], + "type": "text", + "content": "[41] Meng Jin, Shun Yao, Kexin Li, Xiaohua Tian, Xinbing Wang, Chenghu Zhou, and Xinde Cao. 2022. A Passive Eye-in-Hand\" Camera\" for Miniature Robots. In Proceedings of the 20th ACM Conference on Embedded Networked Sensor Systems (SenSys)." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 317, + 647, + 558, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 647, + 558, + 679 + ], + "spans": [ + { + "bbox": [ + 317, + 647, + 558, + 679 + ], + "type": "text", + "content": "[42] Atsutse Kludze, Rabi Shrestha, Chowdhury Miftah, Edward Knightly, Daniel Mittleman, and Yasaman Ghasempour. 2022. Quasi-optical 3D Localization Using Asymmetric Signatures above 100 GHz. In Proceedings of the 28th Annual International Conference on Mobile Computing And Networking (MobiCom)." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 317, + 679, + 558, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 679, + 558, + 703 + ], + "spans": [ + { + "bbox": [ + 317, + 679, + 558, + 703 + ], + "type": "text", + "content": "[43] Manikanta Kotaru, Kiran Joshi, Dinesh Bharadia, and Sachin Katti. 2015. SpotFi: Decimeter Level Localization Using WiFi. In Proceedings of the 2015 ACM Conference on Special Interest Group on Data Communication (SIGCOMM)." + } + ] + } + ], + "index": 47 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 57, + 84, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 57, + 84, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 57, + 84, + 68 + ], + "type": "text", + "content": "Bifrost" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 364, + 57, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 364, + 57, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 364, + 57, + 558, + 68 + ], + "type": "text", + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "spans": [ + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "type": "text", + "content": "388" + } + ] + } + ], + "index": 49 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 86, + 296, + 685 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 52, + 86, + 296, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 86, + 296, + 118 + ], + "spans": [ + { + "bbox": [ + 52, + 86, + 296, + 118 + ], + "type": "text", + "content": "[44] Vikram Kumar, Reza Arablouei, Raja Jurdak, Branislav Kusy, and Neil W Bergmann. 2017. RSSI-based Self-localization with Perturbed Anchor Positions. In Proceedings of the 2017 IEEE 28th Annual International Symposium on Personal, Indoor, and Mobile Radio Communications (PIMRC)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 118, + 295, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 118, + 295, + 142 + ], + "spans": [ + { + "bbox": [ + 52, + 118, + 295, + 142 + ], + "type": "text", + "content": "[45] L-com. 2023. Circular Polarized Patch Antenna. https://www.l-com.com/wireless-antenna-24-ghz-8-dbi-circular-polarized-rh-flat-patch-antennas. (2023). Accessed: 2023-10-03." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 142, + 295, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 142, + 295, + 166 + ], + "spans": [ + { + "bbox": [ + 52, + 142, + 295, + 166 + ], + "type": "text", + "content": "[46] Danyang Li, Jingao Xu, Zheng Yang, Chenshu Wu, Jianbo Li, and Nicholas D Lane. 2021. Wireless Localization with Spatial-temporal Robust Fingerprints. ACM Transactions on Sensor Networks 18, 1 (2021), 1-23." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 166, + 294, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 166, + 294, + 190 + ], + "spans": [ + { + "bbox": [ + 52, + 166, + 294, + 190 + ], + "type": "text", + "content": "[47] Tianxiang Li, Haofan Lu, Reza Rezvani, Ali Abedi, and Omid Abari. 2022. Bringing WiFi Localization to Any WiFi Devices. In Proceedings of the 21st ACM Workshop on Hot Topics in Networks (HotNets)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 190, + 295, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 190, + 295, + 223 + ], + "spans": [ + { + "bbox": [ + 52, + 190, + 295, + 223 + ], + "type": "text", + "content": "[48] Tianxiang Li, Mohammad Hossein Mazaheri, and Omid Abari. 2022. 5g in the Sky: The Future of High-speed Internet via Unmanned Aerial Vehicles. In Proceedings of the 23rd Annual International Workshop on Mobile Computing Systems and Applications." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 223, + 294, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 223, + 294, + 255 + ], + "spans": [ + { + "bbox": [ + 52, + 223, + 294, + 255 + ], + "type": "text", + "content": "[49] Xiang Li, Daqing Zhang, Qin Lv, Jie Xiong, Shengjie Li, Yue Zhang, and Hong Mei. 2017. IndoTrack: DeviceFree Indoor Human Tracking with Commodity Wi-Fi. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 1, 3 (2017)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 255, + 295, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 255, + 295, + 295 + ], + "spans": [ + { + "bbox": [ + 52, + 255, + 295, + 295 + ], + "type": "text", + "content": "[50] Bo Liang, Purui Wang, Renjie Zhao, Heyu Guo, Pengyu Zhang, Junchen Guo, Shunmin Zhu, Hongqiang Harry Liu, Xinyu Zhang, and Chenren Xu. 2023. RF-Chord: Towards Deployable RFID Localization System for Logistic Networks. In Proceedings of the 20th USENIX Symposium on Networked Systems Design and Implementation (NSDI)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 294, + 294, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 294, + 294, + 318 + ], + "spans": [ + { + "bbox": [ + 52, + 294, + 294, + 318 + ], + "type": "text", + "content": "[51] Mohammad Hossein Mazaheri, Alex Chen, and Omid Abari. 2021. mmTag: A Millimeter Wave Backscatter Network. In Proceedings of the 2021 ACM SIGCOMM 2021 Conference (SIGCOMM)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 318, + 294, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 318, + 294, + 342 + ], + "spans": [ + { + "bbox": [ + 52, + 318, + 294, + 342 + ], + "type": "text", + "content": "[52] Francesco Monticone and Andrea Alu. 2015. Leaky-wave Theory, Techniques, and Applications: From Microwaves to Visible Frequencies. Proc. IEEE 103, 5 (2015)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 342, + 295, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 342, + 295, + 373 + ], + "spans": [ + { + "bbox": [ + 52, + 342, + 295, + 373 + ], + "type": "text", + "content": "[53] Xin Na, Xiuzhen Guo, Zihao Yu, Jia Zhang, Yuan He, and Yunhao Liu. 2023. Leggiero: Analog WiFi Backscatter with Payload Transparency. In Proceedings of the 21st Annual International Conference on Mobile Systems, Applications and Services (MobiSys)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 373, + 294, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 373, + 294, + 413 + ], + "spans": [ + { + "bbox": [ + 52, + 373, + 294, + 413 + ], + "type": "text", + "content": "[54] Sujay Narayana, Vijay Rao, R Venkatesha Prasad, Ajay K Kanthila, Kavya Managundi, Luca Mottola, and T Venkata Prabhakar. 2020. LOCI: Privacy-aware, Device-free, Low-power Localization of Multiple Persons Using IR Sensors. In Proceedings of the 19th International Conference on Information Processing in Sensor Networks (IPSN)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 413, + 294, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 413, + 294, + 437 + ], + "spans": [ + { + "bbox": [ + 52, + 413, + 294, + 437 + ], + "type": "text", + "content": "[55] John Nolan, Kun Qian, and Xinyu Zhang. 2021. RoS: Passive Smart Surface for Roadside-to-Vehicle Communication. In Proceedings of the 2021 ACM SIGCOMM 2021 Conference (SIGCOMM)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 52, + 437, + 294, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 437, + 294, + 469 + ], + "spans": [ + { + "bbox": [ + 52, + 437, + 294, + 469 + ], + "type": "text", + "content": "[56] Yuanchao Shu, Zhuqi Li, Borje Karlsson, Yiyong Lin, Thomas Moscibroda, and Kang Shin. [n. d.]. incrementally-deployable Indoor Navigation with Automatic Trace Generation. In Proceedings of IEEE International Conference on Computer Communications." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 52, + 469, + 294, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 469, + 294, + 502 + ], + "spans": [ + { + "bbox": [ + 52, + 469, + 294, + 502 + ], + "type": "text", + "content": "[57] Elahe Soltanaghaei, Avinash Kalyanaraman, and Kamin Whitehouse. 2018. Multipath Triangulation: Decimeter-level WiFi Localization and Orientation with a Single Unaided Receiver. In Proceedings of the 16th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 52, + 502, + 294, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 502, + 294, + 541 + ], + "spans": [ + { + "bbox": [ + 52, + 502, + 294, + 541 + ], + "type": "text", + "content": "[58] Elahe Soltanaghaei, Akarsh Prabhakara, Artur Balanuta, Matthew Anderson, Jan M Rabaey, Swarun Kumar, and Anthony Rowe. 2021. Millimetre: mmWave Retro-reflective Tags for Accurate, Long Range Localization. In Proceedings of the 27th Annual International Conference on Mobile Computing and Networking (MobiCom)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 52, + 541, + 294, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 541, + 294, + 573 + ], + "spans": [ + { + "bbox": [ + 52, + 541, + 294, + 573 + ], + "type": "text", + "content": "[59] Yimiao Sun, Weiguo Wang, Luca Mottola, Ruijin Wang, and Yuan He. 2022. AIM: Acoustic Inertial Measurement for Indoor Drone Localization and Tracking. In Proceedings of the 20th Conference on Embedded Networked Sensor Systems (SenSys)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 52, + 573, + 294, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 573, + 294, + 605 + ], + "spans": [ + { + "bbox": [ + 52, + 573, + 294, + 605 + ], + "type": "text", + "content": "[60] Huy Tran, Abhishek Mukherji, Nirupama Bulusu, Santosh Pandey, and Xu Zhang. 2019. Improving Infrastructure-based Indoor Positioning Systems with Device Motion Detection. In Proceedings of the 2019 IEEE International Conference on Pervasive Computing and Communications (PerCom)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 52, + 605, + 294, + 638 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 605, + 294, + 638 + ], + "spans": [ + { + "bbox": [ + 52, + 605, + 294, + 638 + ], + "type": "text", + "content": "[61] Ju Wang, Hongbo Jiang, Jie Xiong, Kyle Jamieson, Xiaojiang Chen, Dingyi Fang, and Binbin Xie. 2016. LiFS: Low Human-effort, Device-free Localization with Fine-grained Subcarrier Information. In Proceedings of the 22nd Annual International Conference on Mobile Computing and Networking (MobiCom)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 52, + 638, + 294, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 638, + 294, + 669 + ], + "spans": [ + { + "bbox": [ + 52, + 638, + 294, + 669 + ], + "type": "text", + "content": "[62] Weiguo Wang, Yuan He, Meng Jin, Yimiao Sun, and Xiuzhen Guo. 2023. Meta-Speaker: Acoustic Source Projection by Exploiting Air Nonlinearity. In Proceedings of the 29st Annual International Conference on Mobile Computing and Networking (MobiCom)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 52, + 669, + 294, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 669, + 294, + 685 + ], + "spans": [ + { + "bbox": [ + 52, + 669, + 294, + 685 + ], + "type": "text", + "content": "[63] Weiguo Wang, Luca Mottola, Yuan He, Jinming Li, Yimiao Sun, Shuai Li, Hua Jing, and Yulei Wang. 2022. MicNest: Long-range Instant Acoustic Localization" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 317, + 86, + 558, + 663 + ], + "type": "list", + "angle": 0, + "index": 45, + "blocks": [ + { + "bbox": [ + 331, + 86, + 558, + 102 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 86, + 558, + 102 + ], + "spans": [ + { + "bbox": [ + 331, + 86, + 558, + 102 + ], + "type": "text", + "content": "of Drones in Precise Landing. In Proceedings of the 20th Conference on Embedded Networked Sensor Systems (SenSys)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 102, + 558, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 102, + 558, + 126 + ], + "spans": [ + { + "bbox": [ + 317, + 102, + 558, + 126 + ], + "type": "text", + "content": "[64] Yongyong Wei and Rong Zheng. 2020. Handling Device Heterogeneity in Wi-Fi Based Indoor Positioning Systems. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 126, + 558, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 126, + 558, + 143 + ], + "spans": [ + { + "bbox": [ + 317, + 126, + 558, + 143 + ], + "type": "text", + "content": "[65] Yongyong Wei and Rong Zheng. 2021. Efficient Wi-Fi Fingerprint Crowdsourcing for Indoor Localization. IEEE Sensors Journal 22, 6 (2021), 5055-5062." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 143, + 558, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 143, + 558, + 175 + ], + "spans": [ + { + "bbox": [ + 317, + 143, + 558, + 175 + ], + "type": "text", + "content": "[66] Chenshu Wu, Jingao Xu, Zheng Yang, Nicholas D Lane, and Zuwei Yin. 2017. Gain without Pain: Accurate WiFi-based Localization Using Fingerprint Spatial Gradient. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 1, 2 (2017), 1-19." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 175, + 558, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 175, + 558, + 199 + ], + "spans": [ + { + "bbox": [ + 317, + 175, + 558, + 199 + ], + "type": "text", + "content": "[67] Chenshu Wu, Zheng Yang, Zimu Zhou, Yunhao Liu, and Mingyan Liu. 2016. Mitigating Large Errors in WiFi-based Indoor Localization for Smartphones. IEEE Transactions on Vehicular Technology 66, 7 (2016), 6246-6257." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 199, + 558, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 199, + 558, + 223 + ], + "spans": [ + { + "bbox": [ + 317, + 199, + 558, + 223 + ], + "type": "text", + "content": "[68] Yaxiong Xie, Jie Xiong, Mo Li, and Kyle Jamieson. 2019. md-Track: Leveraging Multi-dimensionality for Passive Indoor Wi-Fi Tracking. In Proceedings of the 25th Annual International Conference on Mobile Computing and Networking (MobiCom)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 223, + 558, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 223, + 558, + 247 + ], + "spans": [ + { + "bbox": [ + 317, + 223, + 558, + 247 + ], + "type": "text", + "content": "[69] Jie Xiong and Kyle Jamieson. 2013. ArrayTrack: A Fine-grained Indoor Location System. In Proceedings of the 10th USENIX Symposium on Networked Systems Design and Implementation (NSDI)." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 247, + 558, + 279 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 247, + 558, + 279 + ], + "spans": [ + { + "bbox": [ + 317, + 247, + 558, + 279 + ], + "type": "text", + "content": "[70] Jie Xiong, Karthikeyan Sundaresan, and Kyle Jamieson. 2015. ToneTrack: Leveraging Frequency-agile Radios for Time-based Indoor Wireless Localization. In Proceedings of the 21st Annual International Conference on Mobile Computing and Networking (MobiCom)." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 317, + 279, + 558, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 279, + 558, + 295 + ], + "spans": [ + { + "bbox": [ + 317, + 279, + 558, + 295 + ], + "type": "text", + "content": "[71] Feng Xu and Ke Wu. 2013. Understanding Leaky-wave Structures: A Special Form of Guided-wave Structure. IEEE Microwave Magazine 14, 5 (2013)." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 317, + 295, + 558, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 295, + 558, + 319 + ], + "spans": [ + { + "bbox": [ + 317, + 295, + 558, + 319 + ], + "type": "text", + "content": "[72] Han Xu, Zheng Yang, Zimu Zhou, Ke Yi, and Chunyi Peng. [n. d]. Tum: Towards Ubiquitous Multi-device Localization for Cross-device Interaction. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM)." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 317, + 319, + 558, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 319, + 558, + 352 + ], + "spans": [ + { + "bbox": [ + 317, + 319, + 558, + 352 + ], + "type": "text", + "content": "[73] Kun Yang, Xiaolong Zheng, Jie Xiong, Liang Liu, and Huadong Ma. 2022. Wilmg: Pushing the Limit of WiFi Sensing with Low Transmission Rates. In Proceedings of the 19th Annual IEEE International Conference on Sensing, Communication, and Networking (SECON)." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 317, + 352, + 558, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 352, + 558, + 384 + ], + "spans": [ + { + "bbox": [ + 317, + 352, + 558, + 384 + ], + "type": "text", + "content": "[74] Yu Yang, Yi Ding, Dengpan Yuan, Guang Wang, Xiaoyang Xie, Yunhuai Liu, Tian He, and Desheng Zhang. 2020. Transloc: Transparent Indoor Localization with Uncertain Human Participation for Instant Delivery. In Proceedings of the 26th Annual International Conference on Mobile Computing and Networking (MobiCom)." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 317, + 383, + 558, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 383, + 558, + 399 + ], + "spans": [ + { + "bbox": [ + 317, + 383, + 558, + 399 + ], + "type": "text", + "content": "[75] Zheng Yang, Zimu Zhou, and Yunhao Liu. 2013. From RSSI to CSI: Indoor Localization via Channel Response. Comput. Surveys 46, 2 (2013)." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 317, + 399, + 558, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 399, + 558, + 431 + ], + "spans": [ + { + "bbox": [ + 317, + 399, + 558, + 431 + ], + "type": "text", + "content": "[76] Chia-Yi Yeh, Yasaman Ghasempour, Yasith Amarasinghe, Daniel M Mittleman, and Edward W Knightly. 2020. Security in Terahertz WLANs with Leaky Wave Antennas. In Proceedings of the 13th ACM Conference on Security and Privacy in Wireless and Mobile Networks (WiSec)." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 317, + 431, + 558, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 431, + 558, + 462 + ], + "spans": [ + { + "bbox": [ + 317, + 431, + 558, + 462 + ], + "type": "text", + "content": "[77] Diana Zhang, Jingxian Wang, Junsu Jang, Junbo Zhang, and Swarun Kumar. 2019. On the Feasibility of Wi-Fi Based Material Sensing. In Proceedings of the 25st Annual International Conference on Mobile Computing and Networking (MobiCom)." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 317, + 462, + 558, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 462, + 558, + 487 + ], + "spans": [ + { + "bbox": [ + 317, + 462, + 558, + 487 + ], + "type": "text", + "content": "[78] Jia Zhang, Xiuzhen Guo, Haotian Jiang, Xiaolong Zheng, and Yuan He. 2020. Link Quality Estimation of Cross-technology Communication. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM). 496-505." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 317, + 487, + 558, + 519 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 487, + 558, + 519 + ], + "spans": [ + { + "bbox": [ + 317, + 487, + 558, + 519 + ], + "type": "text", + "content": "[79] Jia Zhang, Xin Na, Rui Xi, Yimiao Sun, and Yuan He. 2023. mmHawkeye: Passive UAV Detection with a COTS mmWave Radar. In Proceedings of the 20th Annual IEEE International Conference on Sensing, Communication, and Networking (SECON)." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 317, + 519, + 558, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 519, + 558, + 552 + ], + "spans": [ + { + "bbox": [ + 317, + 519, + 558, + 552 + ], + "type": "text", + "content": "[80] Jia Zhang, Rui Xi, Yuan He, Yimiao Sun, Xiuzhen Guo, Weiguo Wang, Xin Na, Yunhao Liu, Zhenguo Shi, and Tao Gu. 2023. A Survey of mmWave-based Human Sensing: Technology, Platforms and Applications. IEEE Communications Surveys & Tutorials (2023)." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 317, + 552, + 558, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 552, + 558, + 582 + ], + "spans": [ + { + "bbox": [ + 317, + 552, + 558, + 582 + ], + "type": "text", + "content": "[81] Xianan Zhang, Wei Wang, Xuedou Xiao, Hang Yang, Xinyu Zhang, and Tao Jiang. 2020. Peer-to-Peer Localization for Single-antenna Devices. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 4, 3 (2020), 1-25." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 317, + 582, + 558, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 582, + 558, + 607 + ], + "spans": [ + { + "bbox": [ + 317, + 582, + 558, + 607 + ], + "type": "text", + "content": "[82] Zhenyong Zhang, Shibo He, Yuanchao Shu, and Zhiguo Shi. 2019. A Self-evolving WiFi-based Indoor Navigation System Using Smartphones. IEEE Transactions on Mobile Computing 19, 8 (2019), 1760-1774." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 317, + 607, + 558, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 607, + 558, + 632 + ], + "spans": [ + { + "bbox": [ + 317, + 607, + 558, + 632 + ], + "type": "text", + "content": "[83] Tianyue Zheng, Zhe Chen, Jun Luo, Lin Ke, Chaoyang Zhao, and Yaowen Yang 2021. SiWa: See into Walls via Deep UWB Radar. In Proceedings of the 27th Annual International Conference on Mobile Computing and Networking (MobiCom)." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 317, + 632, + 558, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 632, + 558, + 663 + ], + "spans": [ + { + "bbox": [ + 317, + 632, + 558, + 663 + ], + "type": "text", + "content": "[84] Xiaolong Zheng, Jiliang Wang, Longfei Shangguan, Zimu Zhou, and Yunhao Liu 2016. Smokey: Ubiquitous Smoking Detection with Commercial WiFi Infrastructures. In Proceedings of IEEE International Conference on Computer Communications (INFOCOM)." + } + ] + } + ], + "index": 44 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 57, + 246, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 57, + 246, + 68 + ], + "spans": [ + { + "bbox": [ + 51, + 57, + 246, + 68 + ], + "type": "text", + "content": "SenSys '23, November 12-17, 2023, Istanbul, Türkiye" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 493, + 57, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 493, + 57, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 493, + 57, + 558, + 68 + ], + "type": "text", + "content": "Yimiao Sun, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "spans": [ + { + "bbox": [ + 298, + 731, + 315, + 740 + ], + "type": "text", + "content": "389" + } + ] + } + ], + "index": 46 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file